comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
That is definitely another viable options to what I said in another comment.
public CloseHandlesInfo(Integer closedHandles) { this.closedHandles = closedHandles; this.failedHandles = 0; }
this.failedHandles = 0;
public CloseHandlesInfo(Integer closedHandles) { this.closedHandles = closedHandles; this.failedHandles = 0; }
class CloseHandlesInfo { private final Integer closedHandles; private final Integer failedHandles; /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. */ /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. * @param failedHandles The numbers of handles that failed to close. */ public CloseHandlesInfo(Integer closedHandles, Integer failedHandles) { this.closedHandles = closedHandles; this.failedHandles = failedHandles; } /** * * @return The number of handles closed. */ public int getClosedHandles() { return this.closedHandles; } /** * * @return The number of handles that failed to close. */ public int getFailedHandles() { return this.failedHandles; } }
class CloseHandlesInfo { private final Integer closedHandles; private final Integer failedHandles; /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. * Note : Failed handles was added as a parameter, default value for failed handles is 0 */ /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. * @param failedHandles The numbers of handles that failed to close. */ public CloseHandlesInfo(Integer closedHandles, Integer failedHandles) { this.closedHandles = closedHandles; this.failedHandles = failedHandles; } /** * * @return The number of handles closed. */ public int getClosedHandles() { return this.closedHandles; } /** * * @return The number of handles that failed to close. */ public int getFailedHandles() { return this.failedHandles; } }
Pinging @JonathanGiles as this will be a common occurrence moving forward as service update and change model types, what is the strategy for handle these scenarios.
public CloseHandlesInfo(Integer closedHandles) { this.closedHandles = closedHandles; this.failedHandles = 0; }
this.failedHandles = 0;
public CloseHandlesInfo(Integer closedHandles) { this.closedHandles = closedHandles; this.failedHandles = 0; }
class CloseHandlesInfo { private final Integer closedHandles; private final Integer failedHandles; /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. */ /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. * @param failedHandles The numbers of handles that failed to close. */ public CloseHandlesInfo(Integer closedHandles, Integer failedHandles) { this.closedHandles = closedHandles; this.failedHandles = failedHandles; } /** * * @return The number of handles closed. */ public int getClosedHandles() { return this.closedHandles; } /** * * @return The number of handles that failed to close. */ public int getFailedHandles() { return this.failedHandles; } }
class CloseHandlesInfo { private final Integer closedHandles; private final Integer failedHandles; /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. * Note : Failed handles was added as a parameter, default value for failed handles is 0 */ /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. * @param failedHandles The numbers of handles that failed to close. */ public CloseHandlesInfo(Integer closedHandles, Integer failedHandles) { this.closedHandles = closedHandles; this.failedHandles = failedHandles; } /** * * @return The number of handles closed. */ public int getClosedHandles() { return this.closedHandles; } /** * * @return The number of handles that failed to close. */ public int getFailedHandles() { return this.failedHandles; } }
Added a Javadoc detailing this
public CloseHandlesInfo(Integer closedHandles) { this.closedHandles = closedHandles; this.failedHandles = 0; }
this.failedHandles = 0;
public CloseHandlesInfo(Integer closedHandles) { this.closedHandles = closedHandles; this.failedHandles = 0; }
class CloseHandlesInfo { private final Integer closedHandles; private final Integer failedHandles; /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. */ /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. * @param failedHandles The numbers of handles that failed to close. */ public CloseHandlesInfo(Integer closedHandles, Integer failedHandles) { this.closedHandles = closedHandles; this.failedHandles = failedHandles; } /** * * @return The number of handles closed. */ public int getClosedHandles() { return this.closedHandles; } /** * * @return The number of handles that failed to close. */ public int getFailedHandles() { return this.failedHandles; } }
class CloseHandlesInfo { private final Integer closedHandles; private final Integer failedHandles; /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. * Note : Failed handles was added as a parameter, default value for failed handles is 0 */ /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. * @param failedHandles The numbers of handles that failed to close. */ public CloseHandlesInfo(Integer closedHandles, Integer failedHandles) { this.closedHandles = closedHandles; this.failedHandles = failedHandles; } /** * * @return The number of handles closed. */ public int getClosedHandles() { return this.closedHandles; } /** * * @return The number of handles that failed to close. */ public int getFailedHandles() { return this.failedHandles; } }
I don't entirely understand the question. Can someone please provide a bit more context?
public CloseHandlesInfo(Integer closedHandles) { this.closedHandles = closedHandles; this.failedHandles = 0; }
this.failedHandles = 0;
public CloseHandlesInfo(Integer closedHandles) { this.closedHandles = closedHandles; this.failedHandles = 0; }
class CloseHandlesInfo { private final Integer closedHandles; private final Integer failedHandles; /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. */ /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. * @param failedHandles The numbers of handles that failed to close. */ public CloseHandlesInfo(Integer closedHandles, Integer failedHandles) { this.closedHandles = closedHandles; this.failedHandles = failedHandles; } /** * * @return The number of handles closed. */ public int getClosedHandles() { return this.closedHandles; } /** * * @return The number of handles that failed to close. */ public int getFailedHandles() { return this.failedHandles; } }
class CloseHandlesInfo { private final Integer closedHandles; private final Integer failedHandles; /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. * Note : Failed handles was added as a parameter, default value for failed handles is 0 */ /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. * @param failedHandles The numbers of handles that failed to close. */ public CloseHandlesInfo(Integer closedHandles, Integer failedHandles) { this.closedHandles = closedHandles; this.failedHandles = failedHandles; } /** * * @return The number of handles closed. */ public int getClosedHandles() { return this.closedHandles; } /** * * @return The number of handles that failed to close. */ public int getFailedHandles() { return this.failedHandles; } }
@JonathanGiles This constructor is technically just there to be used by internal code but needs to be public (since it is used in the base package). The failedHandles parameter was recently added since GA (it will need to go into our next release) and this is the new constructor we use in internal code. Should we deprecate the old constructor?
public CloseHandlesInfo(Integer closedHandles) { this.closedHandles = closedHandles; this.failedHandles = 0; }
this.failedHandles = 0;
public CloseHandlesInfo(Integer closedHandles) { this.closedHandles = closedHandles; this.failedHandles = 0; }
class CloseHandlesInfo { private final Integer closedHandles; private final Integer failedHandles; /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. */ /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. * @param failedHandles The numbers of handles that failed to close. */ public CloseHandlesInfo(Integer closedHandles, Integer failedHandles) { this.closedHandles = closedHandles; this.failedHandles = failedHandles; } /** * * @return The number of handles closed. */ public int getClosedHandles() { return this.closedHandles; } /** * * @return The number of handles that failed to close. */ public int getFailedHandles() { return this.failedHandles; } }
class CloseHandlesInfo { private final Integer closedHandles; private final Integer failedHandles; /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. * Note : Failed handles was added as a parameter, default value for failed handles is 0 */ /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. * @param failedHandles The numbers of handles that failed to close. */ public CloseHandlesInfo(Integer closedHandles, Integer failedHandles) { this.closedHandles = closedHandles; this.failedHandles = failedHandles; } /** * * @return The number of handles closed. */ public int getClosedHandles() { return this.closedHandles; } /** * * @return The number of handles that failed to close. */ public int getFailedHandles() { return this.failedHandles; } }
One option is: We could use as a general approach for such common occurrence moving forward, is to use Fluent API approach ( using setters returns Model itself ) , ref: https://dev.to/awwsmm/build-a-fluent-interface-in-java-in-less-than-5-minutes-m7e
public CloseHandlesInfo(Integer closedHandles) { this.closedHandles = closedHandles; this.failedHandles = 0; }
this.failedHandles = 0;
public CloseHandlesInfo(Integer closedHandles) { this.closedHandles = closedHandles; this.failedHandles = 0; }
class CloseHandlesInfo { private final Integer closedHandles; private final Integer failedHandles; /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. */ /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. * @param failedHandles The numbers of handles that failed to close. */ public CloseHandlesInfo(Integer closedHandles, Integer failedHandles) { this.closedHandles = closedHandles; this.failedHandles = failedHandles; } /** * * @return The number of handles closed. */ public int getClosedHandles() { return this.closedHandles; } /** * * @return The number of handles that failed to close. */ public int getFailedHandles() { return this.failedHandles; } }
class CloseHandlesInfo { private final Integer closedHandles; private final Integer failedHandles; /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. * Note : Failed handles was added as a parameter, default value for failed handles is 0 */ /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. * @param failedHandles The numbers of handles that failed to close. */ public CloseHandlesInfo(Integer closedHandles, Integer failedHandles) { this.closedHandles = closedHandles; this.failedHandles = failedHandles; } /** * * @return The number of handles closed. */ public int getClosedHandles() { return this.closedHandles; } /** * * @return The number of handles that failed to close. */ public int getFailedHandles() { return this.failedHandles; } }
Sync vs async we need to baseline its impact.
protected void performWorkload(BaseSubscriber<CosmosAsyncItemResponse> baseSubscriber, long i) throws InterruptedException { int index = (int) (i % docsToRead.size()); Document doc = docsToRead.get(index); String partitionKeyValue = doc.getId(); Mono<CosmosAsyncItemResponse> result = cosmosAsyncContainer.getItem(doc.getId(), partitionKeyValue).read(); concurrencyControlSemaphore.acquire(); if (configuration.getOperationType() == Configuration.Operation.ReadThroughput) { result.subscribeOn(Schedulers.parallel()).subscribe(baseSubscriber); } else { LatencySubscriber<CosmosAsyncItemResponse> latencySubscriber = new LatencySubscriber<>(baseSubscriber); latencySubscriber.context = latency.time(); result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber); } }
concurrencyControlSemaphore.acquire();
protected void performWorkload(BaseSubscriber<CosmosAsyncItemResponse> baseSubscriber, long i) throws InterruptedException { int index = (int) (i % docsToRead.size()); Document doc = docsToRead.get(index); String partitionKeyValue = doc.getId(); Mono<CosmosAsyncItemResponse> result = cosmosAsyncContainer.getItem(doc.getId(), partitionKeyValue).read(); concurrencyControlSemaphore.acquire(); if (configuration.getOperationType() == Configuration.Operation.ReadThroughput) { result.subscribeOn(Schedulers.parallel()).subscribe(baseSubscriber); } else { LatencySubscriber<CosmosAsyncItemResponse> latencySubscriber = new LatencySubscriber<>(baseSubscriber); latencySubscriber.context = latency.time(); result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber); } }
class LatencySubscriber<T> extends BaseSubscriber<T> { Timer.Context context; BaseSubscriber<CosmosAsyncItemResponse> baseSubscriber; LatencySubscriber(BaseSubscriber<CosmosAsyncItemResponse> baseSubscriber) { this.baseSubscriber = baseSubscriber; } @Override protected void hookOnSubscribe(Subscription subscription) { super.hookOnSubscribe(subscription); } @Override protected void hookOnNext(T value) { } @Override protected void hookOnComplete() { context.stop(); baseSubscriber.onComplete(); } @Override protected void hookOnError(Throwable throwable) { context.stop(); baseSubscriber.onError(throwable); } }
class LatencySubscriber<T> extends BaseSubscriber<T> { Timer.Context context; BaseSubscriber<CosmosAsyncItemResponse> baseSubscriber; LatencySubscriber(BaseSubscriber<CosmosAsyncItemResponse> baseSubscriber) { this.baseSubscriber = baseSubscriber; } @Override protected void hookOnSubscribe(Subscription subscription) { super.hookOnSubscribe(subscription); } @Override protected void hookOnNext(T value) { } @Override protected void hookOnComplete() { context.stop(); baseSubscriber.onComplete(); } @Override protected void hookOnError(Throwable throwable) { context.stop(); baseSubscriber.onError(throwable); } }
Why use fully qualifed instead of importing this?
public void testMSIEndpointWithSystemAssignedAccessKeyVault() throws Exception { org.junit.Assume.assumeNotNull(CONFIGURATION.get(Configuration.PROPERTY_MSI_ENDPOINT)); org.junit.Assume.assumeTrue(CONFIGURATION.get(Configuration.PROPERTY_AZURE_CLIENT_ID) == null); org.junit.Assume.assumeNotNull(CONFIGURATION.get(AZURE_VAULT_URL)); ManagedIdentityCredential credential = new ManagedIdentityCredentialBuilder().build(); SecretClient client = new SecretClientBuilder() .credential(credential) .vaultUrl(CONFIGURATION.get(AZURE_VAULT_URL)) .buildClient(); KeyVaultSecret secret = client.getSecret(VAULT_SECRET_NAME); Assert.assertNotNull(secret); Assert.assertEquals(VAULT_SECRET_NAME, secret.getName()); Assert.assertNotNull(secret.getValue()); }
org.junit.Assume.assumeNotNull(CONFIGURATION.get(Configuration.PROPERTY_MSI_ENDPOINT));
public void testMSIEndpointWithSystemAssignedAccessKeyVault() throws Exception { org.junit.Assume.assumeNotNull(CONFIGURATION.get(Configuration.PROPERTY_MSI_ENDPOINT)); org.junit.Assume.assumeTrue(CONFIGURATION.get(Configuration.PROPERTY_AZURE_CLIENT_ID) == null); org.junit.Assume.assumeNotNull(CONFIGURATION.get(AZURE_VAULT_URL)); ManagedIdentityCredential credential = new ManagedIdentityCredentialBuilder().build(); SecretClient client = new SecretClientBuilder() .credential(credential) .vaultUrl(CONFIGURATION.get(AZURE_VAULT_URL)) .buildClient(); KeyVaultSecret secret = client.getSecret(VAULT_SECRET_NAME); Assert.assertNotNull(secret); Assert.assertEquals(VAULT_SECRET_NAME, secret.getName()); Assert.assertNotNull(secret.getValue()); }
class ManagedIdentityCredentialLiveTest { private static final String AZURE_VAULT_URL = "AZURE_VAULT_URL"; private static final String VAULT_SECRET_NAME = "secret"; private static final Configuration CONFIGURATION = Configuration.getGlobalConfiguration().clone(); @Test public void testMSIEndpointWithSystemAssigned() throws Exception { org.junit.Assume.assumeNotNull(CONFIGURATION.get(Configuration.PROPERTY_MSI_ENDPOINT)); org.junit.Assume.assumeTrue(CONFIGURATION.get(Configuration.PROPERTY_AZURE_CLIENT_ID) == null); org.junit.Assume.assumeNotNull(CONFIGURATION.get(AZURE_VAULT_URL)); IdentityClient client = new IdentityClientBuilder().build(); StepVerifier.create(client.authenticateToManagedIdentityEndpoint( CONFIGURATION.get(Configuration.PROPERTY_MSI_ENDPOINT), CONFIGURATION.get(Configuration.PROPERTY_MSI_SECRET), new TokenRequestContext().addScopes("https: .expectNextMatches(accessToken -> accessToken != null && accessToken.getToken() != null) .verifyComplete(); } @Test @Test public void testMSIEndpointWithUserAssigned() throws Exception { org.junit.Assume.assumeNotNull(CONFIGURATION.get(Configuration.PROPERTY_MSI_ENDPOINT)); org.junit.Assume.assumeNotNull(CONFIGURATION.get(Configuration.PROPERTY_AZURE_CLIENT_ID)); org.junit.Assume.assumeNotNull(CONFIGURATION.get(AZURE_VAULT_URL)); IdentityClient client = new IdentityClientBuilder() .clientId(CONFIGURATION.get(Configuration.PROPERTY_AZURE_CLIENT_ID)) .build(); StepVerifier.create(client.authenticateToManagedIdentityEndpoint( CONFIGURATION.get(Configuration.PROPERTY_MSI_ENDPOINT), CONFIGURATION.get(Configuration.PROPERTY_MSI_SECRET), new TokenRequestContext().addScopes("https: .expectNextMatches(accessToken -> accessToken != null && accessToken.getToken() != null) .verifyComplete(); } @Test public void testMSIEndpointWithUserAssignedAccessKeyVault() throws Exception { org.junit.Assume.assumeNotNull(CONFIGURATION.get(Configuration.PROPERTY_MSI_ENDPOINT)); org.junit.Assume.assumeNotNull(CONFIGURATION.get(Configuration.PROPERTY_AZURE_CLIENT_ID)); org.junit.Assume.assumeNotNull(CONFIGURATION.get(AZURE_VAULT_URL)); ManagedIdentityCredential credential = new ManagedIdentityCredentialBuilder() .clientId(CONFIGURATION.get(Configuration.PROPERTY_AZURE_CLIENT_ID)) .build(); SecretClient client = new SecretClientBuilder() .credential(credential) .vaultUrl(CONFIGURATION.get(AZURE_VAULT_URL)) .buildClient(); KeyVaultSecret secret = client.getSecret(VAULT_SECRET_NAME); Assert.assertNotNull(secret); Assert.assertEquals(VAULT_SECRET_NAME, secret.getName()); Assert.assertNotNull(secret.getValue()); } @Test public void testIMDSEndpointWithSystemAssigned() throws Exception { org.junit.Assume.assumeTrue(checkIMDSAvailable()); org.junit.Assume.assumeTrue(CONFIGURATION.get(Configuration.PROPERTY_AZURE_CLIENT_ID) == null); org.junit.Assume.assumeNotNull(CONFIGURATION.get(AZURE_VAULT_URL)); IdentityClient client = new IdentityClientBuilder().build(); StepVerifier.create(client.authenticateToIMDSEndpoint( new TokenRequestContext().addScopes("https: .expectNextMatches(accessToken -> accessToken != null && accessToken.getToken() != null) .verifyComplete(); } @Test public void testIMDSEndpointWithSystemAssignedAccessKeyVault() throws Exception { org.junit.Assume.assumeTrue(checkIMDSAvailable()); org.junit.Assume.assumeTrue(CONFIGURATION.get(Configuration.PROPERTY_AZURE_CLIENT_ID) == null); org.junit.Assume.assumeNotNull(CONFIGURATION.get(AZURE_VAULT_URL)); ManagedIdentityCredential credential = new ManagedIdentityCredentialBuilder().build(); SecretClient client = new SecretClientBuilder() .credential(credential) .vaultUrl(CONFIGURATION.get(AZURE_VAULT_URL)) .buildClient(); KeyVaultSecret secret = client.getSecret(VAULT_SECRET_NAME); Assert.assertNotNull(secret); Assert.assertEquals(VAULT_SECRET_NAME, secret.getName()); Assert.assertNotNull(secret.getValue()); } @Test public void testIMDSEndpointWithUserAssigned() throws Exception { org.junit.Assume.assumeTrue(checkIMDSAvailable()); org.junit.Assume.assumeNotNull(CONFIGURATION.get(Configuration.PROPERTY_AZURE_CLIENT_ID)); org.junit.Assume.assumeNotNull(CONFIGURATION.get(AZURE_VAULT_URL)); IdentityClient client = new IdentityClientBuilder() .clientId(CONFIGURATION.get(Configuration.PROPERTY_AZURE_CLIENT_ID)) .build(); StepVerifier.create(client.authenticateToIMDSEndpoint( new TokenRequestContext().addScopes("https: .expectNextMatches(accessToken -> accessToken != null && accessToken.getToken() != null) .verifyComplete(); } @Test public void testIMDSEndpointWithUserAssignedAccessKeyVault() throws Exception { org.junit.Assume.assumeTrue(checkIMDSAvailable()); org.junit.Assume.assumeNotNull(CONFIGURATION.get(Configuration.PROPERTY_AZURE_CLIENT_ID)); org.junit.Assume.assumeNotNull(CONFIGURATION.get(AZURE_VAULT_URL)); ManagedIdentityCredential credential = new ManagedIdentityCredentialBuilder() .clientId(CONFIGURATION.get(Configuration.PROPERTY_AZURE_CLIENT_ID)) .build(); SecretClient client = new SecretClientBuilder() .credential(credential) .vaultUrl(CONFIGURATION.get(AZURE_VAULT_URL)) .buildClient(); KeyVaultSecret secret = client.getSecret(VAULT_SECRET_NAME); Assert.assertNotNull(secret); Assert.assertEquals(VAULT_SECRET_NAME, secret.getName()); Assert.assertNotNull(secret.getValue()); } private boolean checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return false; } HttpURLConnection connection = null; try { URL url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); return true; } catch (Exception e) { return false; } finally { if (connection != null) { connection.disconnect(); } } } }
class ManagedIdentityCredentialLiveTest { private static final String AZURE_VAULT_URL = "AZURE_VAULT_URL"; private static final String VAULT_SECRET_NAME = "secret"; private static final Configuration CONFIGURATION = Configuration.getGlobalConfiguration().clone(); @Test public void testMSIEndpointWithSystemAssigned() throws Exception { org.junit.Assume.assumeNotNull(CONFIGURATION.get(Configuration.PROPERTY_MSI_ENDPOINT)); org.junit.Assume.assumeTrue(CONFIGURATION.get(Configuration.PROPERTY_AZURE_CLIENT_ID) == null); org.junit.Assume.assumeNotNull(CONFIGURATION.get(AZURE_VAULT_URL)); IdentityClient client = new IdentityClientBuilder().build(); StepVerifier.create(client.authenticateToManagedIdentityEndpoint( CONFIGURATION.get(Configuration.PROPERTY_MSI_ENDPOINT), CONFIGURATION.get(Configuration.PROPERTY_MSI_SECRET), new TokenRequestContext().addScopes("https: .expectNextMatches(accessToken -> accessToken != null && accessToken.getToken() != null) .verifyComplete(); } @Test @Test public void testMSIEndpointWithUserAssigned() throws Exception { org.junit.Assume.assumeNotNull(CONFIGURATION.get(Configuration.PROPERTY_MSI_ENDPOINT)); org.junit.Assume.assumeNotNull(CONFIGURATION.get(Configuration.PROPERTY_AZURE_CLIENT_ID)); org.junit.Assume.assumeNotNull(CONFIGURATION.get(AZURE_VAULT_URL)); IdentityClient client = new IdentityClientBuilder() .clientId(CONFIGURATION.get(Configuration.PROPERTY_AZURE_CLIENT_ID)) .build(); StepVerifier.create(client.authenticateToManagedIdentityEndpoint( CONFIGURATION.get(Configuration.PROPERTY_MSI_ENDPOINT), CONFIGURATION.get(Configuration.PROPERTY_MSI_SECRET), new TokenRequestContext().addScopes("https: .expectNextMatches(accessToken -> accessToken != null && accessToken.getToken() != null) .verifyComplete(); } @Test public void testMSIEndpointWithUserAssignedAccessKeyVault() throws Exception { org.junit.Assume.assumeNotNull(CONFIGURATION.get(Configuration.PROPERTY_MSI_ENDPOINT)); org.junit.Assume.assumeNotNull(CONFIGURATION.get(Configuration.PROPERTY_AZURE_CLIENT_ID)); org.junit.Assume.assumeNotNull(CONFIGURATION.get(AZURE_VAULT_URL)); ManagedIdentityCredential credential = new ManagedIdentityCredentialBuilder() .clientId(CONFIGURATION.get(Configuration.PROPERTY_AZURE_CLIENT_ID)) .build(); SecretClient client = new SecretClientBuilder() .credential(credential) .vaultUrl(CONFIGURATION.get(AZURE_VAULT_URL)) .buildClient(); KeyVaultSecret secret = client.getSecret(VAULT_SECRET_NAME); Assert.assertNotNull(secret); Assert.assertEquals(VAULT_SECRET_NAME, secret.getName()); Assert.assertNotNull(secret.getValue()); } @Test public void testIMDSEndpointWithSystemAssigned() throws Exception { org.junit.Assume.assumeTrue(checkIMDSAvailable()); org.junit.Assume.assumeTrue(CONFIGURATION.get(Configuration.PROPERTY_AZURE_CLIENT_ID) == null); org.junit.Assume.assumeNotNull(CONFIGURATION.get(AZURE_VAULT_URL)); IdentityClient client = new IdentityClientBuilder().build(); StepVerifier.create(client.authenticateToIMDSEndpoint( new TokenRequestContext().addScopes("https: .expectNextMatches(accessToken -> accessToken != null && accessToken.getToken() != null) .verifyComplete(); } @Test public void testIMDSEndpointWithSystemAssignedAccessKeyVault() throws Exception { org.junit.Assume.assumeTrue(checkIMDSAvailable()); org.junit.Assume.assumeTrue(CONFIGURATION.get(Configuration.PROPERTY_AZURE_CLIENT_ID) == null); org.junit.Assume.assumeNotNull(CONFIGURATION.get(AZURE_VAULT_URL)); ManagedIdentityCredential credential = new ManagedIdentityCredentialBuilder().build(); SecretClient client = new SecretClientBuilder() .credential(credential) .vaultUrl(CONFIGURATION.get(AZURE_VAULT_URL)) .buildClient(); KeyVaultSecret secret = client.getSecret(VAULT_SECRET_NAME); Assert.assertNotNull(secret); Assert.assertEquals(VAULT_SECRET_NAME, secret.getName()); Assert.assertNotNull(secret.getValue()); } @Test public void testIMDSEndpointWithUserAssigned() throws Exception { org.junit.Assume.assumeTrue(checkIMDSAvailable()); org.junit.Assume.assumeNotNull(CONFIGURATION.get(Configuration.PROPERTY_AZURE_CLIENT_ID)); org.junit.Assume.assumeNotNull(CONFIGURATION.get(AZURE_VAULT_URL)); IdentityClient client = new IdentityClientBuilder() .clientId(CONFIGURATION.get(Configuration.PROPERTY_AZURE_CLIENT_ID)) .build(); StepVerifier.create(client.authenticateToIMDSEndpoint( new TokenRequestContext().addScopes("https: .expectNextMatches(accessToken -> accessToken != null && accessToken.getToken() != null) .verifyComplete(); } @Test public void testIMDSEndpointWithUserAssignedAccessKeyVault() throws Exception { org.junit.Assume.assumeTrue(checkIMDSAvailable()); org.junit.Assume.assumeNotNull(CONFIGURATION.get(Configuration.PROPERTY_AZURE_CLIENT_ID)); org.junit.Assume.assumeNotNull(CONFIGURATION.get(AZURE_VAULT_URL)); ManagedIdentityCredential credential = new ManagedIdentityCredentialBuilder() .clientId(CONFIGURATION.get(Configuration.PROPERTY_AZURE_CLIENT_ID)) .build(); SecretClient client = new SecretClientBuilder() .credential(credential) .vaultUrl(CONFIGURATION.get(AZURE_VAULT_URL)) .buildClient(); KeyVaultSecret secret = client.getSecret(VAULT_SECRET_NAME); Assert.assertNotNull(secret); Assert.assertEquals(VAULT_SECRET_NAME, secret.getName()); Assert.assertNotNull(secret.getValue()); } private boolean checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return false; } HttpURLConnection connection = null; try { URL url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); return true; } catch (Exception e) { return false; } finally { if (connection != null) { connection.disconnect(); } } } }
sure on my list.
protected void performWorkload(BaseSubscriber<CosmosAsyncItemResponse> baseSubscriber, long i) throws InterruptedException { int index = (int) (i % docsToRead.size()); Document doc = docsToRead.get(index); String partitionKeyValue = doc.getId(); Mono<CosmosAsyncItemResponse> result = cosmosAsyncContainer.getItem(doc.getId(), partitionKeyValue).read(); concurrencyControlSemaphore.acquire(); if (configuration.getOperationType() == Configuration.Operation.ReadThroughput) { result.subscribeOn(Schedulers.parallel()).subscribe(baseSubscriber); } else { LatencySubscriber<CosmosAsyncItemResponse> latencySubscriber = new LatencySubscriber<>(baseSubscriber); latencySubscriber.context = latency.time(); result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber); } }
concurrencyControlSemaphore.acquire();
protected void performWorkload(BaseSubscriber<CosmosAsyncItemResponse> baseSubscriber, long i) throws InterruptedException { int index = (int) (i % docsToRead.size()); Document doc = docsToRead.get(index); String partitionKeyValue = doc.getId(); Mono<CosmosAsyncItemResponse> result = cosmosAsyncContainer.getItem(doc.getId(), partitionKeyValue).read(); concurrencyControlSemaphore.acquire(); if (configuration.getOperationType() == Configuration.Operation.ReadThroughput) { result.subscribeOn(Schedulers.parallel()).subscribe(baseSubscriber); } else { LatencySubscriber<CosmosAsyncItemResponse> latencySubscriber = new LatencySubscriber<>(baseSubscriber); latencySubscriber.context = latency.time(); result.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber); } }
class LatencySubscriber<T> extends BaseSubscriber<T> { Timer.Context context; BaseSubscriber<CosmosAsyncItemResponse> baseSubscriber; LatencySubscriber(BaseSubscriber<CosmosAsyncItemResponse> baseSubscriber) { this.baseSubscriber = baseSubscriber; } @Override protected void hookOnSubscribe(Subscription subscription) { super.hookOnSubscribe(subscription); } @Override protected void hookOnNext(T value) { } @Override protected void hookOnComplete() { context.stop(); baseSubscriber.onComplete(); } @Override protected void hookOnError(Throwable throwable) { context.stop(); baseSubscriber.onError(throwable); } }
class LatencySubscriber<T> extends BaseSubscriber<T> { Timer.Context context; BaseSubscriber<CosmosAsyncItemResponse> baseSubscriber; LatencySubscriber(BaseSubscriber<CosmosAsyncItemResponse> baseSubscriber) { this.baseSubscriber = baseSubscriber; } @Override protected void hookOnSubscribe(Subscription subscription) { super.hookOnSubscribe(subscription); } @Override protected void hookOnNext(T value) { } @Override protected void hookOnComplete() { context.stop(); baseSubscriber.onComplete(); } @Override protected void hookOnError(Throwable throwable) { context.stop(); baseSubscriber.onError(throwable); } }
An empty string seems different than a collection with no items in it (which is what was returned before). Is this correct? It may account for why those JSON files are breaking.
public String getLabelFilter() { return labelFilter == null ? "" : labelFilter; }
return labelFilter == null ? "" : labelFilter;
public String getLabelFilter() { return labelFilter; }
class SettingSelector { private String keyFilter; private String labelFilter; private SettingFields[] fields; private String acceptDatetime; /** * Creates a setting selector that will populate responses with all of the {@link ConfigurationSetting * ConfigurationSetting's} properties and select all {@link ConfigurationSetting */ public SettingSelector() { } /** * Gets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @return The expressions to filter ConfigurationSetting keys on. */ public String getKeyFilter() { return keyFilter == null ? "" : keyFilter; } /** * Sets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @param keyFilter The expressions to filter ConfigurationSetting keys on. * @return The updated SettingSelector object */ public SettingSelector setKeyFilter(String keyFilter) { this.keyFilter = keyFilter; return this; } /** * Gets the labels used to filter settings based on their {@link ConfigurationSetting * service. * * If the value is {@code null} or an empty string, all ConfigurationSettings with {@link * ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned.</li> * <li>If {@code labelFilter = ""}, settings without any label are returned.</li> * <li>If {@code labelFilter = null}, settings without any label are returned.</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @return labels The labels used to filter GET requests from the service. */ /** * Sets the expression to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned. (This is the default label.)</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @param labelFilter The expressions to filter ConfigurationSetting labels on. If the provided value is * {@code null} or {@code ""}, all ConfigurationSettings will be returned regardless of their label. * @return SettingSelector The updated SettingSelector object. */ public SettingSelector setLabelFilter(String labelFilter) { this.labelFilter = labelFilter; return this; } /** * Gets the date time for the request query. When the query is performed, if {@code acceptDateTime} is set, the * {@link ConfigurationSetting * the current value is returned. * * @return Gets the currently set datetime in {@link DateTimeFormatter */ public String getAcceptDateTime() { return this.acceptDatetime; } /** * If set, then configuration setting values will be retrieved as they existed at the provided datetime. Otherwise, * the current values are returned. * * @param datetime The value of the configuration setting at that given {@link OffsetDateTime}. * @return The updated SettingSelector object. */ public SettingSelector setAcceptDatetime(OffsetDateTime datetime) { this.acceptDatetime = DateTimeFormatter.RFC_1123_DATE_TIME.toFormat().format(datetime); return this; } /** * Gets the fields on {@link ConfigurationSetting} to return from the GET request. If none are set, the service * returns the ConfigurationSettings with all of their fields populated. * * @return The set of {@link ConfigurationSetting} fields to return for a GET request. */ public SettingFields[] getFields() { return fields == null ? new SettingFields[0] : CoreUtils.clone(fields); } /** * Sets fields that will be returned in the response corresponding to properties in {@link ConfigurationSetting}. If * none are set, the service returns ConfigurationSettings with all of their fields populated. * * @param fields The fields to select for the query response. If none are set, the service will return the * ConfigurationSettings with a default set of properties. * @return The updated SettingSelector object. */ public SettingSelector setFields(SettingFields... fields) { this.fields = fields; return this; } @Override public String toString() { String fields; if (CoreUtils.isNullOrEmpty(this.fields)) { fields = "ALL_FIELDS"; } else { fields = CoreUtils.arrayToString(this.fields, SettingFields::toStringMapper); } return String.format("SettingSelector(keyFilter=%s, labelFilter=%s, acceptDateTime=%s, fields=%s)", this.keyFilter, this.labelFilter, this.acceptDatetime, fields); } }
class SettingSelector { private String keyFilter; private String labelFilter; private SettingFields[] fields; private String acceptDatetime; /** * Creates a setting selector that will populate responses with all of the {@link ConfigurationSetting * ConfigurationSetting's} properties and select all {@link ConfigurationSetting */ public SettingSelector() { } /** * Gets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @return The expressions to filter ConfigurationSetting keys on. */ public String getKeyFilter() { return keyFilter; } /** * Sets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @param keyFilter The expressions to filter ConfigurationSetting keys on. * @return The updated SettingSelector object */ public SettingSelector setKeyFilter(String keyFilter) { this.keyFilter = keyFilter; return this; } /** * Gets the labels used to filter settings based on their {@link ConfigurationSetting * service. * * If the value is {@code null} or an empty string, all ConfigurationSettings with {@link * ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned.</li> * <li>If {@code labelFilter = ""}, settings without any label are returned.</li> * <li>If {@code labelFilter = null}, settings without any label are returned.</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @return labels The labels used to filter GET requests from the service. */ /** * Sets the expression to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned. (This is the default label.)</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @param labelFilter The expressions to filter ConfigurationSetting labels on. If the provided value is * {@code null} or {@code ""}, all ConfigurationSettings will be returned regardless of their label. * @return SettingSelector The updated SettingSelector object. */ public SettingSelector setLabelFilter(String labelFilter) { this.labelFilter = labelFilter; return this; } /** * Gets the date time for the request query. When the query is performed, if {@code acceptDateTime} is set, the * {@link ConfigurationSetting * the current value is returned. * * @return Gets the currently set datetime in {@link DateTimeFormatter */ public String getAcceptDateTime() { return this.acceptDatetime; } /** * If set, then configuration setting values will be retrieved as they existed at the provided datetime. Otherwise, * the current values are returned. * * @param datetime The value of the configuration setting at that given {@link OffsetDateTime}. * @return The updated SettingSelector object. */ public SettingSelector setAcceptDatetime(OffsetDateTime datetime) { this.acceptDatetime = DateTimeFormatter.RFC_1123_DATE_TIME.toFormat().format(datetime); return this; } /** * Gets the fields on {@link ConfigurationSetting} to return from the GET request. If none are set, the service * returns the ConfigurationSettings with all of their fields populated. * * @return The set of {@link ConfigurationSetting} fields to return for a GET request. */ public SettingFields[] getFields() { return fields == null ? new SettingFields[0] : CoreUtils.clone(fields); } /** * Sets fields that will be returned in the response corresponding to properties in {@link ConfigurationSetting}. If * none are set, the service returns ConfigurationSettings with all of their fields populated. * * @param fields The fields to select for the query response. If none are set, the service will return the * ConfigurationSettings with a default set of properties. * @return The updated SettingSelector object. */ public SettingSelector setFields(SettingFields... fields) { this.fields = fields; return this; } @Override public String toString() { String fields; if (CoreUtils.isNullOrEmpty(this.fields)) { fields = "ALL_FIELDS"; } else { fields = CoreUtils.arrayToString(this.fields, SettingFields::toStringMapper); } return String.format("SettingSelector(keyFilter=%s, labelFilter=%s, acceptDateTime=%s, fields=%s)", this.keyFilter, this.labelFilter, this.acceptDatetime, fields); } }
Thanks. It should be the reason
public String getLabelFilter() { return labelFilter == null ? "" : labelFilter; }
return labelFilter == null ? "" : labelFilter;
public String getLabelFilter() { return labelFilter; }
class SettingSelector { private String keyFilter; private String labelFilter; private SettingFields[] fields; private String acceptDatetime; /** * Creates a setting selector that will populate responses with all of the {@link ConfigurationSetting * ConfigurationSetting's} properties and select all {@link ConfigurationSetting */ public SettingSelector() { } /** * Gets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @return The expressions to filter ConfigurationSetting keys on. */ public String getKeyFilter() { return keyFilter == null ? "" : keyFilter; } /** * Sets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @param keyFilter The expressions to filter ConfigurationSetting keys on. * @return The updated SettingSelector object */ public SettingSelector setKeyFilter(String keyFilter) { this.keyFilter = keyFilter; return this; } /** * Gets the labels used to filter settings based on their {@link ConfigurationSetting * service. * * If the value is {@code null} or an empty string, all ConfigurationSettings with {@link * ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned.</li> * <li>If {@code labelFilter = ""}, settings without any label are returned.</li> * <li>If {@code labelFilter = null}, settings without any label are returned.</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @return labels The labels used to filter GET requests from the service. */ /** * Sets the expression to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned. (This is the default label.)</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @param labelFilter The expressions to filter ConfigurationSetting labels on. If the provided value is * {@code null} or {@code ""}, all ConfigurationSettings will be returned regardless of their label. * @return SettingSelector The updated SettingSelector object. */ public SettingSelector setLabelFilter(String labelFilter) { this.labelFilter = labelFilter; return this; } /** * Gets the date time for the request query. When the query is performed, if {@code acceptDateTime} is set, the * {@link ConfigurationSetting * the current value is returned. * * @return Gets the currently set datetime in {@link DateTimeFormatter */ public String getAcceptDateTime() { return this.acceptDatetime; } /** * If set, then configuration setting values will be retrieved as they existed at the provided datetime. Otherwise, * the current values are returned. * * @param datetime The value of the configuration setting at that given {@link OffsetDateTime}. * @return The updated SettingSelector object. */ public SettingSelector setAcceptDatetime(OffsetDateTime datetime) { this.acceptDatetime = DateTimeFormatter.RFC_1123_DATE_TIME.toFormat().format(datetime); return this; } /** * Gets the fields on {@link ConfigurationSetting} to return from the GET request. If none are set, the service * returns the ConfigurationSettings with all of their fields populated. * * @return The set of {@link ConfigurationSetting} fields to return for a GET request. */ public SettingFields[] getFields() { return fields == null ? new SettingFields[0] : CoreUtils.clone(fields); } /** * Sets fields that will be returned in the response corresponding to properties in {@link ConfigurationSetting}. If * none are set, the service returns ConfigurationSettings with all of their fields populated. * * @param fields The fields to select for the query response. If none are set, the service will return the * ConfigurationSettings with a default set of properties. * @return The updated SettingSelector object. */ public SettingSelector setFields(SettingFields... fields) { this.fields = fields; return this; } @Override public String toString() { String fields; if (CoreUtils.isNullOrEmpty(this.fields)) { fields = "ALL_FIELDS"; } else { fields = CoreUtils.arrayToString(this.fields, SettingFields::toStringMapper); } return String.format("SettingSelector(keyFilter=%s, labelFilter=%s, acceptDateTime=%s, fields=%s)", this.keyFilter, this.labelFilter, this.acceptDatetime, fields); } }
class SettingSelector { private String keyFilter; private String labelFilter; private SettingFields[] fields; private String acceptDatetime; /** * Creates a setting selector that will populate responses with all of the {@link ConfigurationSetting * ConfigurationSetting's} properties and select all {@link ConfigurationSetting */ public SettingSelector() { } /** * Gets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @return The expressions to filter ConfigurationSetting keys on. */ public String getKeyFilter() { return keyFilter; } /** * Sets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @param keyFilter The expressions to filter ConfigurationSetting keys on. * @return The updated SettingSelector object */ public SettingSelector setKeyFilter(String keyFilter) { this.keyFilter = keyFilter; return this; } /** * Gets the labels used to filter settings based on their {@link ConfigurationSetting * service. * * If the value is {@code null} or an empty string, all ConfigurationSettings with {@link * ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned.</li> * <li>If {@code labelFilter = ""}, settings without any label are returned.</li> * <li>If {@code labelFilter = null}, settings without any label are returned.</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @return labels The labels used to filter GET requests from the service. */ /** * Sets the expression to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned. (This is the default label.)</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @param labelFilter The expressions to filter ConfigurationSetting labels on. If the provided value is * {@code null} or {@code ""}, all ConfigurationSettings will be returned regardless of their label. * @return SettingSelector The updated SettingSelector object. */ public SettingSelector setLabelFilter(String labelFilter) { this.labelFilter = labelFilter; return this; } /** * Gets the date time for the request query. When the query is performed, if {@code acceptDateTime} is set, the * {@link ConfigurationSetting * the current value is returned. * * @return Gets the currently set datetime in {@link DateTimeFormatter */ public String getAcceptDateTime() { return this.acceptDatetime; } /** * If set, then configuration setting values will be retrieved as they existed at the provided datetime. Otherwise, * the current values are returned. * * @param datetime The value of the configuration setting at that given {@link OffsetDateTime}. * @return The updated SettingSelector object. */ public SettingSelector setAcceptDatetime(OffsetDateTime datetime) { this.acceptDatetime = DateTimeFormatter.RFC_1123_DATE_TIME.toFormat().format(datetime); return this; } /** * Gets the fields on {@link ConfigurationSetting} to return from the GET request. If none are set, the service * returns the ConfigurationSettings with all of their fields populated. * * @return The set of {@link ConfigurationSetting} fields to return for a GET request. */ public SettingFields[] getFields() { return fields == null ? new SettingFields[0] : CoreUtils.clone(fields); } /** * Sets fields that will be returned in the response corresponding to properties in {@link ConfigurationSetting}. If * none are set, the service returns ConfigurationSettings with all of their fields populated. * * @param fields The fields to select for the query response. If none are set, the service will return the * ConfigurationSettings with a default set of properties. * @return The updated SettingSelector object. */ public SettingSelector setFields(SettingFields... fields) { this.fields = fields; return this; } @Override public String toString() { String fields; if (CoreUtils.isNullOrEmpty(this.fields)) { fields = "ALL_FIELDS"; } else { fields = CoreUtils.arrayToString(this.fields, SettingFields::toStringMapper); } return String.format("SettingSelector(keyFilter=%s, labelFilter=%s, acceptDateTime=%s, fields=%s)", this.keyFilter, this.labelFilter, this.acceptDatetime, fields); } }
Is this correct then?
public String getKeyFilter() { return keyFilter == null ? "" : keyFilter; }
return keyFilter == null ? "" : keyFilter;
public String getKeyFilter() { return keyFilter; }
class SettingSelector { private String keyFilter; private String labelFilter; private SettingFields[] fields; private String acceptDatetime; /** * Creates a setting selector that will populate responses with all of the {@link ConfigurationSetting * ConfigurationSetting's} properties and select all {@link ConfigurationSetting */ public SettingSelector() { } /** * Gets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @return The expressions to filter ConfigurationSetting keys on. */ /** * Sets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @param keyFilter The expressions to filter ConfigurationSetting keys on. * @return The updated SettingSelector object */ public SettingSelector setKeyFilter(String keyFilter) { this.keyFilter = keyFilter; return this; } /** * Gets the labels used to filter settings based on their {@link ConfigurationSetting * service. * * If the value is {@code null} or an empty string, all ConfigurationSettings with {@link * ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned.</li> * <li>If {@code labelFilter = ""}, settings without any label are returned.</li> * <li>If {@code labelFilter = null}, settings without any label are returned.</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @return labels The labels used to filter GET requests from the service. */ public String getLabelFilter() { return labelFilter; } /** * Sets the expression to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned. (This is the default label.)</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @param labelFilter The expressions to filter ConfigurationSetting labels on. If the provided value is * {@code null} or {@code ""}, all ConfigurationSettings will be returned regardless of their label. * @return SettingSelector The updated SettingSelector object. */ public SettingSelector setLabelFilter(String labelFilter) { this.labelFilter = labelFilter; return this; } /** * Gets the date time for the request query. When the query is performed, if {@code acceptDateTime} is set, the * {@link ConfigurationSetting * the current value is returned. * * @return Gets the currently set datetime in {@link DateTimeFormatter */ public String getAcceptDateTime() { return this.acceptDatetime; } /** * If set, then configuration setting values will be retrieved as they existed at the provided datetime. Otherwise, * the current values are returned. * * @param datetime The value of the configuration setting at that given {@link OffsetDateTime}. * @return The updated SettingSelector object. */ public SettingSelector setAcceptDatetime(OffsetDateTime datetime) { this.acceptDatetime = DateTimeFormatter.RFC_1123_DATE_TIME.toFormat().format(datetime); return this; } /** * Gets the fields on {@link ConfigurationSetting} to return from the GET request. If none are set, the service * returns the ConfigurationSettings with all of their fields populated. * * @return The set of {@link ConfigurationSetting} fields to return for a GET request. */ public SettingFields[] getFields() { return fields == null ? new SettingFields[0] : CoreUtils.clone(fields); } /** * Sets fields that will be returned in the response corresponding to properties in {@link ConfigurationSetting}. If * none are set, the service returns ConfigurationSettings with all of their fields populated. * * @param fields The fields to select for the query response. If none are set, the service will return the * ConfigurationSettings with a default set of properties. * @return The updated SettingSelector object. */ public SettingSelector setFields(SettingFields... fields) { this.fields = fields; return this; } @Override public String toString() { String fields; if (CoreUtils.isNullOrEmpty(this.fields)) { fields = "ALL_FIELDS"; } else { fields = CoreUtils.arrayToString(this.fields, SettingFields::toStringMapper); } return String.format("SettingSelector(keyFilter=%s, labelFilter=%s, acceptDateTime=%s, fields=%s)", this.keyFilter, this.labelFilter, this.acceptDatetime, fields); } }
class SettingSelector { private String keyFilter; private String labelFilter; private SettingFields[] fields; private String acceptDatetime; /** * Creates a setting selector that will populate responses with all of the {@link ConfigurationSetting * ConfigurationSetting's} properties and select all {@link ConfigurationSetting */ public SettingSelector() { } /** * Gets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @return The expressions to filter ConfigurationSetting keys on. */ /** * Sets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @param keyFilter The expressions to filter ConfigurationSetting keys on. * @return The updated SettingSelector object */ public SettingSelector setKeyFilter(String keyFilter) { this.keyFilter = keyFilter; return this; } /** * Gets the labels used to filter settings based on their {@link ConfigurationSetting * service. * * If the value is {@code null} or an empty string, all ConfigurationSettings with {@link * ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned.</li> * <li>If {@code labelFilter = ""}, settings without any label are returned.</li> * <li>If {@code labelFilter = null}, settings without any label are returned.</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @return labels The labels used to filter GET requests from the service. */ public String getLabelFilter() { return labelFilter; } /** * Sets the expression to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned. (This is the default label.)</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @param labelFilter The expressions to filter ConfigurationSetting labels on. If the provided value is * {@code null} or {@code ""}, all ConfigurationSettings will be returned regardless of their label. * @return SettingSelector The updated SettingSelector object. */ public SettingSelector setLabelFilter(String labelFilter) { this.labelFilter = labelFilter; return this; } /** * Gets the date time for the request query. When the query is performed, if {@code acceptDateTime} is set, the * {@link ConfigurationSetting * the current value is returned. * * @return Gets the currently set datetime in {@link DateTimeFormatter */ public String getAcceptDateTime() { return this.acceptDatetime; } /** * If set, then configuration setting values will be retrieved as they existed at the provided datetime. Otherwise, * the current values are returned. * * @param datetime The value of the configuration setting at that given {@link OffsetDateTime}. * @return The updated SettingSelector object. */ public SettingSelector setAcceptDatetime(OffsetDateTime datetime) { this.acceptDatetime = DateTimeFormatter.RFC_1123_DATE_TIME.toFormat().format(datetime); return this; } /** * Gets the fields on {@link ConfigurationSetting} to return from the GET request. If none are set, the service * returns the ConfigurationSettings with all of their fields populated. * * @return The set of {@link ConfigurationSetting} fields to return for a GET request. */ public SettingFields[] getFields() { return fields == null ? new SettingFields[0] : CoreUtils.clone(fields); } /** * Sets fields that will be returned in the response corresponding to properties in {@link ConfigurationSetting}. If * none are set, the service returns ConfigurationSettings with all of their fields populated. * * @param fields The fields to select for the query response. If none are set, the service will return the * ConfigurationSettings with a default set of properties. * @return The updated SettingSelector object. */ public SettingSelector setFields(SettingFields... fields) { this.fields = fields; return this; } @Override public String toString() { String fields; if (CoreUtils.isNullOrEmpty(this.fields)) { fields = "ALL_FIELDS"; } else { fields = CoreUtils.arrayToString(this.fields, SettingFields::toStringMapper); } return String.format("SettingSelector(keyFilter=%s, labelFilter=%s, acceptDateTime=%s, fields=%s)", this.keyFilter, this.labelFilter, this.acceptDatetime, fields); } }
Let me double check with the feature team. The label is optional so it could be null. But the key should be required.
public String getKeyFilter() { return keyFilter == null ? "" : keyFilter; }
return keyFilter == null ? "" : keyFilter;
public String getKeyFilter() { return keyFilter; }
class SettingSelector { private String keyFilter; private String labelFilter; private SettingFields[] fields; private String acceptDatetime; /** * Creates a setting selector that will populate responses with all of the {@link ConfigurationSetting * ConfigurationSetting's} properties and select all {@link ConfigurationSetting */ public SettingSelector() { } /** * Gets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @return The expressions to filter ConfigurationSetting keys on. */ /** * Sets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @param keyFilter The expressions to filter ConfigurationSetting keys on. * @return The updated SettingSelector object */ public SettingSelector setKeyFilter(String keyFilter) { this.keyFilter = keyFilter; return this; } /** * Gets the labels used to filter settings based on their {@link ConfigurationSetting * service. * * If the value is {@code null} or an empty string, all ConfigurationSettings with {@link * ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned.</li> * <li>If {@code labelFilter = ""}, settings without any label are returned.</li> * <li>If {@code labelFilter = null}, settings without any label are returned.</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @return labels The labels used to filter GET requests from the service. */ public String getLabelFilter() { return labelFilter; } /** * Sets the expression to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned. (This is the default label.)</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @param labelFilter The expressions to filter ConfigurationSetting labels on. If the provided value is * {@code null} or {@code ""}, all ConfigurationSettings will be returned regardless of their label. * @return SettingSelector The updated SettingSelector object. */ public SettingSelector setLabelFilter(String labelFilter) { this.labelFilter = labelFilter; return this; } /** * Gets the date time for the request query. When the query is performed, if {@code acceptDateTime} is set, the * {@link ConfigurationSetting * the current value is returned. * * @return Gets the currently set datetime in {@link DateTimeFormatter */ public String getAcceptDateTime() { return this.acceptDatetime; } /** * If set, then configuration setting values will be retrieved as they existed at the provided datetime. Otherwise, * the current values are returned. * * @param datetime The value of the configuration setting at that given {@link OffsetDateTime}. * @return The updated SettingSelector object. */ public SettingSelector setAcceptDatetime(OffsetDateTime datetime) { this.acceptDatetime = DateTimeFormatter.RFC_1123_DATE_TIME.toFormat().format(datetime); return this; } /** * Gets the fields on {@link ConfigurationSetting} to return from the GET request. If none are set, the service * returns the ConfigurationSettings with all of their fields populated. * * @return The set of {@link ConfigurationSetting} fields to return for a GET request. */ public SettingFields[] getFields() { return fields == null ? new SettingFields[0] : CoreUtils.clone(fields); } /** * Sets fields that will be returned in the response corresponding to properties in {@link ConfigurationSetting}. If * none are set, the service returns ConfigurationSettings with all of their fields populated. * * @param fields The fields to select for the query response. If none are set, the service will return the * ConfigurationSettings with a default set of properties. * @return The updated SettingSelector object. */ public SettingSelector setFields(SettingFields... fields) { this.fields = fields; return this; } @Override public String toString() { String fields; if (CoreUtils.isNullOrEmpty(this.fields)) { fields = "ALL_FIELDS"; } else { fields = CoreUtils.arrayToString(this.fields, SettingFields::toStringMapper); } return String.format("SettingSelector(keyFilter=%s, labelFilter=%s, acceptDateTime=%s, fields=%s)", this.keyFilter, this.labelFilter, this.acceptDatetime, fields); } }
class SettingSelector { private String keyFilter; private String labelFilter; private SettingFields[] fields; private String acceptDatetime; /** * Creates a setting selector that will populate responses with all of the {@link ConfigurationSetting * ConfigurationSetting's} properties and select all {@link ConfigurationSetting */ public SettingSelector() { } /** * Gets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @return The expressions to filter ConfigurationSetting keys on. */ /** * Sets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @param keyFilter The expressions to filter ConfigurationSetting keys on. * @return The updated SettingSelector object */ public SettingSelector setKeyFilter(String keyFilter) { this.keyFilter = keyFilter; return this; } /** * Gets the labels used to filter settings based on their {@link ConfigurationSetting * service. * * If the value is {@code null} or an empty string, all ConfigurationSettings with {@link * ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned.</li> * <li>If {@code labelFilter = ""}, settings without any label are returned.</li> * <li>If {@code labelFilter = null}, settings without any label are returned.</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @return labels The labels used to filter GET requests from the service. */ public String getLabelFilter() { return labelFilter; } /** * Sets the expression to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned. (This is the default label.)</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @param labelFilter The expressions to filter ConfigurationSetting labels on. If the provided value is * {@code null} or {@code ""}, all ConfigurationSettings will be returned regardless of their label. * @return SettingSelector The updated SettingSelector object. */ public SettingSelector setLabelFilter(String labelFilter) { this.labelFilter = labelFilter; return this; } /** * Gets the date time for the request query. When the query is performed, if {@code acceptDateTime} is set, the * {@link ConfigurationSetting * the current value is returned. * * @return Gets the currently set datetime in {@link DateTimeFormatter */ public String getAcceptDateTime() { return this.acceptDatetime; } /** * If set, then configuration setting values will be retrieved as they existed at the provided datetime. Otherwise, * the current values are returned. * * @param datetime The value of the configuration setting at that given {@link OffsetDateTime}. * @return The updated SettingSelector object. */ public SettingSelector setAcceptDatetime(OffsetDateTime datetime) { this.acceptDatetime = DateTimeFormatter.RFC_1123_DATE_TIME.toFormat().format(datetime); return this; } /** * Gets the fields on {@link ConfigurationSetting} to return from the GET request. If none are set, the service * returns the ConfigurationSettings with all of their fields populated. * * @return The set of {@link ConfigurationSetting} fields to return for a GET request. */ public SettingFields[] getFields() { return fields == null ? new SettingFields[0] : CoreUtils.clone(fields); } /** * Sets fields that will be returned in the response corresponding to properties in {@link ConfigurationSetting}. If * none are set, the service returns ConfigurationSettings with all of their fields populated. * * @param fields The fields to select for the query response. If none are set, the service will return the * ConfigurationSettings with a default set of properties. * @return The updated SettingSelector object. */ public SettingSelector setFields(SettingFields... fields) { this.fields = fields; return this; } @Override public String toString() { String fields; if (CoreUtils.isNullOrEmpty(this.fields)) { fields = "ALL_FIELDS"; } else { fields = CoreUtils.arrayToString(this.fields, SettingFields::toStringMapper); } return String.format("SettingSelector(keyFilter=%s, labelFilter=%s, acceptDateTime=%s, fields=%s)", this.keyFilter, this.labelFilter, this.acceptDatetime, fields); } }
In this case, the behaviour is different because you returned an empty set before (nothing in it). But now, you're returning an empty string. Is it possible to have a key with an empty string? If it is possible, then you'll never get a "key is required" message, because users who don't specify a key, will get a configuration value with the key of `""` returned.
public String getKeyFilter() { return keyFilter == null ? "" : keyFilter; }
return keyFilter == null ? "" : keyFilter;
public String getKeyFilter() { return keyFilter; }
class SettingSelector { private String keyFilter; private String labelFilter; private SettingFields[] fields; private String acceptDatetime; /** * Creates a setting selector that will populate responses with all of the {@link ConfigurationSetting * ConfigurationSetting's} properties and select all {@link ConfigurationSetting */ public SettingSelector() { } /** * Gets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @return The expressions to filter ConfigurationSetting keys on. */ /** * Sets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @param keyFilter The expressions to filter ConfigurationSetting keys on. * @return The updated SettingSelector object */ public SettingSelector setKeyFilter(String keyFilter) { this.keyFilter = keyFilter; return this; } /** * Gets the labels used to filter settings based on their {@link ConfigurationSetting * service. * * If the value is {@code null} or an empty string, all ConfigurationSettings with {@link * ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned.</li> * <li>If {@code labelFilter = ""}, settings without any label are returned.</li> * <li>If {@code labelFilter = null}, settings without any label are returned.</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @return labels The labels used to filter GET requests from the service. */ public String getLabelFilter() { return labelFilter; } /** * Sets the expression to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned. (This is the default label.)</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @param labelFilter The expressions to filter ConfigurationSetting labels on. If the provided value is * {@code null} or {@code ""}, all ConfigurationSettings will be returned regardless of their label. * @return SettingSelector The updated SettingSelector object. */ public SettingSelector setLabelFilter(String labelFilter) { this.labelFilter = labelFilter; return this; } /** * Gets the date time for the request query. When the query is performed, if {@code acceptDateTime} is set, the * {@link ConfigurationSetting * the current value is returned. * * @return Gets the currently set datetime in {@link DateTimeFormatter */ public String getAcceptDateTime() { return this.acceptDatetime; } /** * If set, then configuration setting values will be retrieved as they existed at the provided datetime. Otherwise, * the current values are returned. * * @param datetime The value of the configuration setting at that given {@link OffsetDateTime}. * @return The updated SettingSelector object. */ public SettingSelector setAcceptDatetime(OffsetDateTime datetime) { this.acceptDatetime = DateTimeFormatter.RFC_1123_DATE_TIME.toFormat().format(datetime); return this; } /** * Gets the fields on {@link ConfigurationSetting} to return from the GET request. If none are set, the service * returns the ConfigurationSettings with all of their fields populated. * * @return The set of {@link ConfigurationSetting} fields to return for a GET request. */ public SettingFields[] getFields() { return fields == null ? new SettingFields[0] : CoreUtils.clone(fields); } /** * Sets fields that will be returned in the response corresponding to properties in {@link ConfigurationSetting}. If * none are set, the service returns ConfigurationSettings with all of their fields populated. * * @param fields The fields to select for the query response. If none are set, the service will return the * ConfigurationSettings with a default set of properties. * @return The updated SettingSelector object. */ public SettingSelector setFields(SettingFields... fields) { this.fields = fields; return this; } @Override public String toString() { String fields; if (CoreUtils.isNullOrEmpty(this.fields)) { fields = "ALL_FIELDS"; } else { fields = CoreUtils.arrayToString(this.fields, SettingFields::toStringMapper); } return String.format("SettingSelector(keyFilter=%s, labelFilter=%s, acceptDateTime=%s, fields=%s)", this.keyFilter, this.labelFilter, this.acceptDatetime, fields); } }
class SettingSelector { private String keyFilter; private String labelFilter; private SettingFields[] fields; private String acceptDatetime; /** * Creates a setting selector that will populate responses with all of the {@link ConfigurationSetting * ConfigurationSetting's} properties and select all {@link ConfigurationSetting */ public SettingSelector() { } /** * Gets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @return The expressions to filter ConfigurationSetting keys on. */ /** * Sets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @param keyFilter The expressions to filter ConfigurationSetting keys on. * @return The updated SettingSelector object */ public SettingSelector setKeyFilter(String keyFilter) { this.keyFilter = keyFilter; return this; } /** * Gets the labels used to filter settings based on their {@link ConfigurationSetting * service. * * If the value is {@code null} or an empty string, all ConfigurationSettings with {@link * ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned.</li> * <li>If {@code labelFilter = ""}, settings without any label are returned.</li> * <li>If {@code labelFilter = null}, settings without any label are returned.</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @return labels The labels used to filter GET requests from the service. */ public String getLabelFilter() { return labelFilter; } /** * Sets the expression to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned. (This is the default label.)</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @param labelFilter The expressions to filter ConfigurationSetting labels on. If the provided value is * {@code null} or {@code ""}, all ConfigurationSettings will be returned regardless of their label. * @return SettingSelector The updated SettingSelector object. */ public SettingSelector setLabelFilter(String labelFilter) { this.labelFilter = labelFilter; return this; } /** * Gets the date time for the request query. When the query is performed, if {@code acceptDateTime} is set, the * {@link ConfigurationSetting * the current value is returned. * * @return Gets the currently set datetime in {@link DateTimeFormatter */ public String getAcceptDateTime() { return this.acceptDatetime; } /** * If set, then configuration setting values will be retrieved as they existed at the provided datetime. Otherwise, * the current values are returned. * * @param datetime The value of the configuration setting at that given {@link OffsetDateTime}. * @return The updated SettingSelector object. */ public SettingSelector setAcceptDatetime(OffsetDateTime datetime) { this.acceptDatetime = DateTimeFormatter.RFC_1123_DATE_TIME.toFormat().format(datetime); return this; } /** * Gets the fields on {@link ConfigurationSetting} to return from the GET request. If none are set, the service * returns the ConfigurationSettings with all of their fields populated. * * @return The set of {@link ConfigurationSetting} fields to return for a GET request. */ public SettingFields[] getFields() { return fields == null ? new SettingFields[0] : CoreUtils.clone(fields); } /** * Sets fields that will be returned in the response corresponding to properties in {@link ConfigurationSetting}. If * none are set, the service returns ConfigurationSettings with all of their fields populated. * * @param fields The fields to select for the query response. If none are set, the service will return the * ConfigurationSettings with a default set of properties. * @return The updated SettingSelector object. */ public SettingSelector setFields(SettingFields... fields) { this.fields = fields; return this; } @Override public String toString() { String fields; if (CoreUtils.isNullOrEmpty(this.fields)) { fields = "ALL_FIELDS"; } else { fields = CoreUtils.arrayToString(this.fields, SettingFields::toStringMapper); } return String.format("SettingSelector(keyFilter=%s, labelFilter=%s, acceptDateTime=%s, fields=%s)", this.keyFilter, this.labelFilter, this.acceptDatetime, fields); } }
You are right. https://github.com/Azure/AppConfiguration/blob/master/docs/REST/kv.md
public String getKeyFilter() { return keyFilter == null ? "" : keyFilter; }
return keyFilter == null ? "" : keyFilter;
public String getKeyFilter() { return keyFilter; }
class SettingSelector { private String keyFilter; private String labelFilter; private SettingFields[] fields; private String acceptDatetime; /** * Creates a setting selector that will populate responses with all of the {@link ConfigurationSetting * ConfigurationSetting's} properties and select all {@link ConfigurationSetting */ public SettingSelector() { } /** * Gets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @return The expressions to filter ConfigurationSetting keys on. */ /** * Sets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @param keyFilter The expressions to filter ConfigurationSetting keys on. * @return The updated SettingSelector object */ public SettingSelector setKeyFilter(String keyFilter) { this.keyFilter = keyFilter; return this; } /** * Gets the labels used to filter settings based on their {@link ConfigurationSetting * service. * * If the value is {@code null} or an empty string, all ConfigurationSettings with {@link * ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned.</li> * <li>If {@code labelFilter = ""}, settings without any label are returned.</li> * <li>If {@code labelFilter = null}, settings without any label are returned.</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @return labels The labels used to filter GET requests from the service. */ public String getLabelFilter() { return labelFilter; } /** * Sets the expression to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned. (This is the default label.)</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @param labelFilter The expressions to filter ConfigurationSetting labels on. If the provided value is * {@code null} or {@code ""}, all ConfigurationSettings will be returned regardless of their label. * @return SettingSelector The updated SettingSelector object. */ public SettingSelector setLabelFilter(String labelFilter) { this.labelFilter = labelFilter; return this; } /** * Gets the date time for the request query. When the query is performed, if {@code acceptDateTime} is set, the * {@link ConfigurationSetting * the current value is returned. * * @return Gets the currently set datetime in {@link DateTimeFormatter */ public String getAcceptDateTime() { return this.acceptDatetime; } /** * If set, then configuration setting values will be retrieved as they existed at the provided datetime. Otherwise, * the current values are returned. * * @param datetime The value of the configuration setting at that given {@link OffsetDateTime}. * @return The updated SettingSelector object. */ public SettingSelector setAcceptDatetime(OffsetDateTime datetime) { this.acceptDatetime = DateTimeFormatter.RFC_1123_DATE_TIME.toFormat().format(datetime); return this; } /** * Gets the fields on {@link ConfigurationSetting} to return from the GET request. If none are set, the service * returns the ConfigurationSettings with all of their fields populated. * * @return The set of {@link ConfigurationSetting} fields to return for a GET request. */ public SettingFields[] getFields() { return fields == null ? new SettingFields[0] : CoreUtils.clone(fields); } /** * Sets fields that will be returned in the response corresponding to properties in {@link ConfigurationSetting}. If * none are set, the service returns ConfigurationSettings with all of their fields populated. * * @param fields The fields to select for the query response. If none are set, the service will return the * ConfigurationSettings with a default set of properties. * @return The updated SettingSelector object. */ public SettingSelector setFields(SettingFields... fields) { this.fields = fields; return this; } @Override public String toString() { String fields; if (CoreUtils.isNullOrEmpty(this.fields)) { fields = "ALL_FIELDS"; } else { fields = CoreUtils.arrayToString(this.fields, SettingFields::toStringMapper); } return String.format("SettingSelector(keyFilter=%s, labelFilter=%s, acceptDateTime=%s, fields=%s)", this.keyFilter, this.labelFilter, this.acceptDatetime, fields); } }
class SettingSelector { private String keyFilter; private String labelFilter; private SettingFields[] fields; private String acceptDatetime; /** * Creates a setting selector that will populate responses with all of the {@link ConfigurationSetting * ConfigurationSetting's} properties and select all {@link ConfigurationSetting */ public SettingSelector() { } /** * Gets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @return The expressions to filter ConfigurationSetting keys on. */ /** * Sets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @param keyFilter The expressions to filter ConfigurationSetting keys on. * @return The updated SettingSelector object */ public SettingSelector setKeyFilter(String keyFilter) { this.keyFilter = keyFilter; return this; } /** * Gets the labels used to filter settings based on their {@link ConfigurationSetting * service. * * If the value is {@code null} or an empty string, all ConfigurationSettings with {@link * ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned.</li> * <li>If {@code labelFilter = ""}, settings without any label are returned.</li> * <li>If {@code labelFilter = null}, settings without any label are returned.</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @return labels The labels used to filter GET requests from the service. */ public String getLabelFilter() { return labelFilter; } /** * Sets the expression to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned. (This is the default label.)</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @param labelFilter The expressions to filter ConfigurationSetting labels on. If the provided value is * {@code null} or {@code ""}, all ConfigurationSettings will be returned regardless of their label. * @return SettingSelector The updated SettingSelector object. */ public SettingSelector setLabelFilter(String labelFilter) { this.labelFilter = labelFilter; return this; } /** * Gets the date time for the request query. When the query is performed, if {@code acceptDateTime} is set, the * {@link ConfigurationSetting * the current value is returned. * * @return Gets the currently set datetime in {@link DateTimeFormatter */ public String getAcceptDateTime() { return this.acceptDatetime; } /** * If set, then configuration setting values will be retrieved as they existed at the provided datetime. Otherwise, * the current values are returned. * * @param datetime The value of the configuration setting at that given {@link OffsetDateTime}. * @return The updated SettingSelector object. */ public SettingSelector setAcceptDatetime(OffsetDateTime datetime) { this.acceptDatetime = DateTimeFormatter.RFC_1123_DATE_TIME.toFormat().format(datetime); return this; } /** * Gets the fields on {@link ConfigurationSetting} to return from the GET request. If none are set, the service * returns the ConfigurationSettings with all of their fields populated. * * @return The set of {@link ConfigurationSetting} fields to return for a GET request. */ public SettingFields[] getFields() { return fields == null ? new SettingFields[0] : CoreUtils.clone(fields); } /** * Sets fields that will be returned in the response corresponding to properties in {@link ConfigurationSetting}. If * none are set, the service returns ConfigurationSettings with all of their fields populated. * * @param fields The fields to select for the query response. If none are set, the service will return the * ConfigurationSettings with a default set of properties. * @return The updated SettingSelector object. */ public SettingSelector setFields(SettingFields... fields) { this.fields = fields; return this; } @Override public String toString() { String fields; if (CoreUtils.isNullOrEmpty(this.fields)) { fields = "ALL_FIELDS"; } else { fields = CoreUtils.arrayToString(this.fields, SettingFields::toStringMapper); } return String.format("SettingSelector(keyFilter=%s, labelFilter=%s, acceptDateTime=%s, fields=%s)", this.keyFilter, this.labelFilter, this.acceptDatetime, fields); } }
Add a message when using Objects.requireNonNull or it'll throw a NullPointer with no indication of what happened. Also, update the javadocs with `@throws`.
public SettingSelector setKeyFilter(String keyFilter) { Objects.requireNonNull(keyFilter); this.keyFilter = keyFilter; return this; }
Objects.requireNonNull(keyFilter);
public SettingSelector setKeyFilter(String keyFilter) { this.keyFilter = keyFilter; return this; }
class SettingSelector { private String keyFilter; private String labelFilter; private SettingFields[] fields; private String acceptDatetime; /** * Creates a setting selector that will populate responses with all of the {@link ConfigurationSetting * ConfigurationSetting's} properties and select all {@link ConfigurationSetting */ public SettingSelector() { } /** * Gets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @return The expressions to filter ConfigurationSetting keys on. */ public String getKeyFilter() { return keyFilter; } /** * Sets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @param keyFilter The expressions to filter ConfigurationSetting keys on. * @return The updated SettingSelector object */ /** * Gets the labels used to filter settings based on their {@link ConfigurationSetting * service. * * If the value is {@code null} or an empty string, all ConfigurationSettings with {@link * ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned.</li> * <li>If {@code labelFilter = ""}, settings without any label are returned.</li> * <li>If {@code labelFilter = null}, settings without any label are returned.</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @return labels The labels used to filter GET requests from the service. */ public String getLabelFilter() { return labelFilter; } /** * Sets the expression to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned. (This is the default label.)</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @param labelFilter The expressions to filter ConfigurationSetting labels on. If the provided value is * {@code null} or {@code ""}, all ConfigurationSettings will be returned regardless of their label. * @return SettingSelector The updated SettingSelector object. */ public SettingSelector setLabelFilter(String labelFilter) { this.labelFilter = labelFilter; return this; } /** * Gets the date time for the request query. When the query is performed, if {@code acceptDateTime} is set, the * {@link ConfigurationSetting * the current value is returned. * * @return Gets the currently set datetime in {@link DateTimeFormatter */ public String getAcceptDateTime() { return this.acceptDatetime; } /** * If set, then configuration setting values will be retrieved as they existed at the provided datetime. Otherwise, * the current values are returned. * * @param datetime The value of the configuration setting at that given {@link OffsetDateTime}. * @return The updated SettingSelector object. */ public SettingSelector setAcceptDatetime(OffsetDateTime datetime) { this.acceptDatetime = DateTimeFormatter.RFC_1123_DATE_TIME.toFormat().format(datetime); return this; } /** * Gets the fields on {@link ConfigurationSetting} to return from the GET request. If none are set, the service * returns the ConfigurationSettings with all of their fields populated. * * @return The set of {@link ConfigurationSetting} fields to return for a GET request. */ public SettingFields[] getFields() { return fields == null ? new SettingFields[0] : CoreUtils.clone(fields); } /** * Sets fields that will be returned in the response corresponding to properties in {@link ConfigurationSetting}. If * none are set, the service returns ConfigurationSettings with all of their fields populated. * * @param fields The fields to select for the query response. If none are set, the service will return the * ConfigurationSettings with a default set of properties. * @return The updated SettingSelector object. */ public SettingSelector setFields(SettingFields... fields) { this.fields = fields; return this; } @Override public String toString() { String fields; if (CoreUtils.isNullOrEmpty(this.fields)) { fields = "ALL_FIELDS"; } else { fields = CoreUtils.arrayToString(this.fields, SettingFields::toStringMapper); } return String.format("SettingSelector(keyFilter=%s, labelFilter=%s, acceptDateTime=%s, fields=%s)", this.keyFilter, this.labelFilter, this.acceptDatetime, fields); } }
class SettingSelector { private String keyFilter; private String labelFilter; private SettingFields[] fields; private String acceptDatetime; /** * Creates a setting selector that will populate responses with all of the {@link ConfigurationSetting * ConfigurationSetting's} properties and select all {@link ConfigurationSetting */ public SettingSelector() { } /** * Gets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @return The expressions to filter ConfigurationSetting keys on. */ public String getKeyFilter() { return keyFilter; } /** * Sets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @param keyFilter The expressions to filter ConfigurationSetting keys on. * @return The updated SettingSelector object */ /** * Gets the labels used to filter settings based on their {@link ConfigurationSetting * service. * * If the value is {@code null} or an empty string, all ConfigurationSettings with {@link * ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned.</li> * <li>If {@code labelFilter = ""}, settings without any label are returned.</li> * <li>If {@code labelFilter = null}, settings without any label are returned.</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @return labels The labels used to filter GET requests from the service. */ public String getLabelFilter() { return labelFilter; } /** * Sets the expression to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned. (This is the default label.)</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @param labelFilter The expressions to filter ConfigurationSetting labels on. If the provided value is * {@code null} or {@code ""}, all ConfigurationSettings will be returned regardless of their label. * @return SettingSelector The updated SettingSelector object. */ public SettingSelector setLabelFilter(String labelFilter) { this.labelFilter = labelFilter; return this; } /** * Gets the date time for the request query. When the query is performed, if {@code acceptDateTime} is set, the * {@link ConfigurationSetting * the current value is returned. * * @return Gets the currently set datetime in {@link DateTimeFormatter */ public String getAcceptDateTime() { return this.acceptDatetime; } /** * If set, then configuration setting values will be retrieved as they existed at the provided datetime. Otherwise, * the current values are returned. * * @param datetime The value of the configuration setting at that given {@link OffsetDateTime}. * @return The updated SettingSelector object. */ public SettingSelector setAcceptDatetime(OffsetDateTime datetime) { this.acceptDatetime = DateTimeFormatter.RFC_1123_DATE_TIME.toFormat().format(datetime); return this; } /** * Gets the fields on {@link ConfigurationSetting} to return from the GET request. If none are set, the service * returns the ConfigurationSettings with all of their fields populated. * * @return The set of {@link ConfigurationSetting} fields to return for a GET request. */ public SettingFields[] getFields() { return fields == null ? new SettingFields[0] : CoreUtils.clone(fields); } /** * Sets fields that will be returned in the response corresponding to properties in {@link ConfigurationSetting}. If * none are set, the service returns ConfigurationSettings with all of their fields populated. * * @param fields The fields to select for the query response. If none are set, the service will return the * ConfigurationSettings with a default set of properties. * @return The updated SettingSelector object. */ public SettingSelector setFields(SettingFields... fields) { this.fields = fields; return this; } @Override public String toString() { String fields; if (CoreUtils.isNullOrEmpty(this.fields)) { fields = "ALL_FIELDS"; } else { fields = CoreUtils.arrayToString(this.fields, SettingFields::toStringMapper); } return String.format("SettingSelector(keyFilter=%s, labelFilter=%s, acceptDateTime=%s, fields=%s)", this.keyFilter, this.labelFilter, this.acceptDatetime, fields); } }
Actually, looking at our SDK guidelines, we shouldn't be doing this verification at all and allowing the service to return a bad request. https://azure.github.io/azure-sdk/general_implementation.html#parameter-validation
public SettingSelector setKeyFilter(String keyFilter) { Objects.requireNonNull(keyFilter); this.keyFilter = keyFilter; return this; }
Objects.requireNonNull(keyFilter);
public SettingSelector setKeyFilter(String keyFilter) { this.keyFilter = keyFilter; return this; }
class SettingSelector { private String keyFilter; private String labelFilter; private SettingFields[] fields; private String acceptDatetime; /** * Creates a setting selector that will populate responses with all of the {@link ConfigurationSetting * ConfigurationSetting's} properties and select all {@link ConfigurationSetting */ public SettingSelector() { } /** * Gets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @return The expressions to filter ConfigurationSetting keys on. */ public String getKeyFilter() { return keyFilter; } /** * Sets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @param keyFilter The expressions to filter ConfigurationSetting keys on. * @return The updated SettingSelector object */ /** * Gets the labels used to filter settings based on their {@link ConfigurationSetting * service. * * If the value is {@code null} or an empty string, all ConfigurationSettings with {@link * ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned.</li> * <li>If {@code labelFilter = ""}, settings without any label are returned.</li> * <li>If {@code labelFilter = null}, settings without any label are returned.</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @return labels The labels used to filter GET requests from the service. */ public String getLabelFilter() { return labelFilter; } /** * Sets the expression to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned. (This is the default label.)</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @param labelFilter The expressions to filter ConfigurationSetting labels on. If the provided value is * {@code null} or {@code ""}, all ConfigurationSettings will be returned regardless of their label. * @return SettingSelector The updated SettingSelector object. */ public SettingSelector setLabelFilter(String labelFilter) { this.labelFilter = labelFilter; return this; } /** * Gets the date time for the request query. When the query is performed, if {@code acceptDateTime} is set, the * {@link ConfigurationSetting * the current value is returned. * * @return Gets the currently set datetime in {@link DateTimeFormatter */ public String getAcceptDateTime() { return this.acceptDatetime; } /** * If set, then configuration setting values will be retrieved as they existed at the provided datetime. Otherwise, * the current values are returned. * * @param datetime The value of the configuration setting at that given {@link OffsetDateTime}. * @return The updated SettingSelector object. */ public SettingSelector setAcceptDatetime(OffsetDateTime datetime) { this.acceptDatetime = DateTimeFormatter.RFC_1123_DATE_TIME.toFormat().format(datetime); return this; } /** * Gets the fields on {@link ConfigurationSetting} to return from the GET request. If none are set, the service * returns the ConfigurationSettings with all of their fields populated. * * @return The set of {@link ConfigurationSetting} fields to return for a GET request. */ public SettingFields[] getFields() { return fields == null ? new SettingFields[0] : CoreUtils.clone(fields); } /** * Sets fields that will be returned in the response corresponding to properties in {@link ConfigurationSetting}. If * none are set, the service returns ConfigurationSettings with all of their fields populated. * * @param fields The fields to select for the query response. If none are set, the service will return the * ConfigurationSettings with a default set of properties. * @return The updated SettingSelector object. */ public SettingSelector setFields(SettingFields... fields) { this.fields = fields; return this; } @Override public String toString() { String fields; if (CoreUtils.isNullOrEmpty(this.fields)) { fields = "ALL_FIELDS"; } else { fields = CoreUtils.arrayToString(this.fields, SettingFields::toStringMapper); } return String.format("SettingSelector(keyFilter=%s, labelFilter=%s, acceptDateTime=%s, fields=%s)", this.keyFilter, this.labelFilter, this.acceptDatetime, fields); } }
class SettingSelector { private String keyFilter; private String labelFilter; private SettingFields[] fields; private String acceptDatetime; /** * Creates a setting selector that will populate responses with all of the {@link ConfigurationSetting * ConfigurationSetting's} properties and select all {@link ConfigurationSetting */ public SettingSelector() { } /** * Gets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @return The expressions to filter ConfigurationSetting keys on. */ public String getKeyFilter() { return keyFilter; } /** * Sets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @param keyFilter The expressions to filter ConfigurationSetting keys on. * @return The updated SettingSelector object */ /** * Gets the labels used to filter settings based on their {@link ConfigurationSetting * service. * * If the value is {@code null} or an empty string, all ConfigurationSettings with {@link * ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned.</li> * <li>If {@code labelFilter = ""}, settings without any label are returned.</li> * <li>If {@code labelFilter = null}, settings without any label are returned.</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @return labels The labels used to filter GET requests from the service. */ public String getLabelFilter() { return labelFilter; } /** * Sets the expression to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned. (This is the default label.)</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @param labelFilter The expressions to filter ConfigurationSetting labels on. If the provided value is * {@code null} or {@code ""}, all ConfigurationSettings will be returned regardless of their label. * @return SettingSelector The updated SettingSelector object. */ public SettingSelector setLabelFilter(String labelFilter) { this.labelFilter = labelFilter; return this; } /** * Gets the date time for the request query. When the query is performed, if {@code acceptDateTime} is set, the * {@link ConfigurationSetting * the current value is returned. * * @return Gets the currently set datetime in {@link DateTimeFormatter */ public String getAcceptDateTime() { return this.acceptDatetime; } /** * If set, then configuration setting values will be retrieved as they existed at the provided datetime. Otherwise, * the current values are returned. * * @param datetime The value of the configuration setting at that given {@link OffsetDateTime}. * @return The updated SettingSelector object. */ public SettingSelector setAcceptDatetime(OffsetDateTime datetime) { this.acceptDatetime = DateTimeFormatter.RFC_1123_DATE_TIME.toFormat().format(datetime); return this; } /** * Gets the fields on {@link ConfigurationSetting} to return from the GET request. If none are set, the service * returns the ConfigurationSettings with all of their fields populated. * * @return The set of {@link ConfigurationSetting} fields to return for a GET request. */ public SettingFields[] getFields() { return fields == null ? new SettingFields[0] : CoreUtils.clone(fields); } /** * Sets fields that will be returned in the response corresponding to properties in {@link ConfigurationSetting}. If * none are set, the service returns ConfigurationSettings with all of their fields populated. * * @param fields The fields to select for the query response. If none are set, the service will return the * ConfigurationSettings with a default set of properties. * @return The updated SettingSelector object. */ public SettingSelector setFields(SettingFields... fields) { this.fields = fields; return this; } @Override public String toString() { String fields; if (CoreUtils.isNullOrEmpty(this.fields)) { fields = "ALL_FIELDS"; } else { fields = CoreUtils.arrayToString(this.fields, SettingFields::toStringMapper); } return String.format("SettingSelector(keyFilter=%s, labelFilter=%s, acceptDateTime=%s, fields=%s)", this.keyFilter, this.labelFilter, this.acceptDatetime, fields); } }
Why are we acquiring the lock so early in the `performWorkload`?
protected void performWorkload(BaseSubscriber<FeedResponse<Document>> baseSubscriber, long i) throws InterruptedException { concurrencyControlSemaphore.acquire(); Flux<FeedResponse<Document>> obs; Random r = new Random(); FeedOptions options = new FeedOptions(); if (configuration.getOperationType() == Configuration.Operation.QueryCross) { int index = r.nextInt(1000); options.setEnableCrossPartitionQuery(true); String sqlQuery = "Select * from c where c._rid = \"" + docsToRead.get(index).getResourceId() + "\""; obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); } else if (configuration.getOperationType() == Configuration.Operation.QuerySingle) { int index = r.nextInt(1000); String pk = docsToRead.get(index).getString("pk"); options.partitionKey(new PartitionKey(pk)); String sqlQuery = "Select * from c where c.pk = \"" + pk + "\""; obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); } else if (configuration.getOperationType() == Configuration.Operation.QueryParallel) { options.maxItemCount(10); options.setEnableCrossPartitionQuery(true); String sqlQuery = "Select * from c"; obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); } else if (configuration.getOperationType() == Configuration.Operation.QueryOrderby) { options.maxItemCount(10); options.setEnableCrossPartitionQuery(true); String sqlQuery = "Select * from c order by c._ts"; obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); } else if (configuration.getOperationType() == Configuration.Operation.QueryAggregate) { options.maxItemCount(10); options.setEnableCrossPartitionQuery(true); String sqlQuery = "Select value max(c._ts) from c"; obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); } else if (configuration.getOperationType() == Configuration.Operation.QueryAggregateTopOrderby) { options.setEnableCrossPartitionQuery(true); String sqlQuery = "Select top 1 value count(c) from c order by c._ts"; obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); } else if (configuration.getOperationType() == Configuration.Operation.QueryTopOrderby) { options.setEnableCrossPartitionQuery(true); String sqlQuery = "Select top 1000 * from c order by c._ts"; obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); } else if (configuration.getOperationType() == Configuration.Operation.QueryInClauseParallel) { ReadMyWriteWorkflow.QueryBuilder queryBuilder = new ReadMyWriteWorkflow.QueryBuilder(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(200); List<SqlParameter> parameters = new ArrayList<>(); int j = 0; for(Document doc: docsToRead) { String partitionKeyValue = doc.getId(); parameters.add(new SqlParameter("@param" + j, partitionKeyValue)); j++; } queryBuilder.whereClause(new ReadMyWriteWorkflow.QueryBuilder.WhereClause.InWhereClause(partitionKey, parameters)); SqlQuerySpec query = queryBuilder.toSqlQuerySpec(); obs = client.queryDocuments(getCollectionLink(), query, options); } else { throw new IllegalArgumentException("Unsupported Operation: " + configuration.getOperationType()); } LatencySubscriber<FeedResponse> latencySubscriber = new LatencySubscriber(baseSubscriber); latencySubscriber.context = latency.time(); obs.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber); }
concurrencyControlSemaphore.acquire();
protected void performWorkload(BaseSubscriber<FeedResponse<Document>> baseSubscriber, long i) throws InterruptedException { Flux<FeedResponse<Document>> obs; Random r = new Random(); FeedOptions options = new FeedOptions(); if (configuration.getOperationType() == Configuration.Operation.QueryCross) { int index = r.nextInt(1000); options.setEnableCrossPartitionQuery(true); String sqlQuery = "Select * from c where c._rid = \"" + docsToRead.get(index).getResourceId() + "\""; obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); } else if (configuration.getOperationType() == Configuration.Operation.QuerySingle) { int index = r.nextInt(1000); String pk = docsToRead.get(index).getString("pk"); options.partitionKey(new PartitionKey(pk)); String sqlQuery = "Select * from c where c.pk = \"" + pk + "\""; obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); } else if (configuration.getOperationType() == Configuration.Operation.QueryParallel) { options.maxItemCount(10); options.setEnableCrossPartitionQuery(true); String sqlQuery = "Select * from c"; obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); } else if (configuration.getOperationType() == Configuration.Operation.QueryOrderby) { options.maxItemCount(10); options.setEnableCrossPartitionQuery(true); String sqlQuery = "Select * from c order by c._ts"; obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); } else if (configuration.getOperationType() == Configuration.Operation.QueryAggregate) { options.maxItemCount(10); options.setEnableCrossPartitionQuery(true); String sqlQuery = "Select value max(c._ts) from c"; obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); } else if (configuration.getOperationType() == Configuration.Operation.QueryAggregateTopOrderby) { options.setEnableCrossPartitionQuery(true); String sqlQuery = "Select top 1 value count(c) from c order by c._ts"; obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); } else if (configuration.getOperationType() == Configuration.Operation.QueryTopOrderby) { options.setEnableCrossPartitionQuery(true); String sqlQuery = "Select top 1000 * from c order by c._ts"; obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); } else if (configuration.getOperationType() == Configuration.Operation.QueryInClauseParallel) { ReadMyWriteWorkflow.QueryBuilder queryBuilder = new ReadMyWriteWorkflow.QueryBuilder(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(200); List<SqlParameter> parameters = new ArrayList<>(); int j = 0; for(Document doc: docsToRead) { String partitionKeyValue = doc.getId(); parameters.add(new SqlParameter("@param" + j, partitionKeyValue)); j++; } queryBuilder.whereClause(new ReadMyWriteWorkflow.QueryBuilder.WhereClause.InWhereClause(partitionKey, parameters)); SqlQuerySpec query = queryBuilder.toSqlQuerySpec(); obs = client.queryDocuments(getCollectionLink(), query, options); } else { throw new IllegalArgumentException("Unsupported Operation: " + configuration.getOperationType()); } concurrencyControlSemaphore.acquire(); LatencySubscriber<FeedResponse> latencySubscriber = new LatencySubscriber(baseSubscriber); latencySubscriber.context = latency.time(); obs.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber); }
class LatencySubscriber<T> extends BaseSubscriber<T> { Timer.Context context; BaseSubscriber<T> baseSubscriber; LatencySubscriber(BaseSubscriber<T> baseSubscriber) { this.baseSubscriber = baseSubscriber; } @Override protected void hookOnSubscribe(Subscription subscription) { super.hookOnSubscribe(subscription); } @Override protected void hookOnNext(T value) { } @Override protected void hookOnComplete() { context.stop(); baseSubscriber.onComplete(); } @Override protected void hookOnError(Throwable throwable) { context.stop(); baseSubscriber.onError(throwable); } }
class LatencySubscriber<T> extends BaseSubscriber<T> { Timer.Context context; BaseSubscriber<T> baseSubscriber; LatencySubscriber(BaseSubscriber<T> baseSubscriber) { this.baseSubscriber = baseSubscriber; } @Override protected void hookOnSubscribe(Subscription subscription) { super.hookOnSubscribe(subscription); } @Override protected void hookOnNext(T value) { } @Override protected void hookOnComplete() { context.stop(); baseSubscriber.onComplete(); } @Override protected void hookOnError(Throwable throwable) { context.stop(); baseSubscriber.onError(throwable); } }
sure, moved down. DONE
protected void performWorkload(BaseSubscriber<FeedResponse<Document>> baseSubscriber, long i) throws InterruptedException { concurrencyControlSemaphore.acquire(); Flux<FeedResponse<Document>> obs; Random r = new Random(); FeedOptions options = new FeedOptions(); if (configuration.getOperationType() == Configuration.Operation.QueryCross) { int index = r.nextInt(1000); options.setEnableCrossPartitionQuery(true); String sqlQuery = "Select * from c where c._rid = \"" + docsToRead.get(index).getResourceId() + "\""; obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); } else if (configuration.getOperationType() == Configuration.Operation.QuerySingle) { int index = r.nextInt(1000); String pk = docsToRead.get(index).getString("pk"); options.partitionKey(new PartitionKey(pk)); String sqlQuery = "Select * from c where c.pk = \"" + pk + "\""; obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); } else if (configuration.getOperationType() == Configuration.Operation.QueryParallel) { options.maxItemCount(10); options.setEnableCrossPartitionQuery(true); String sqlQuery = "Select * from c"; obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); } else if (configuration.getOperationType() == Configuration.Operation.QueryOrderby) { options.maxItemCount(10); options.setEnableCrossPartitionQuery(true); String sqlQuery = "Select * from c order by c._ts"; obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); } else if (configuration.getOperationType() == Configuration.Operation.QueryAggregate) { options.maxItemCount(10); options.setEnableCrossPartitionQuery(true); String sqlQuery = "Select value max(c._ts) from c"; obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); } else if (configuration.getOperationType() == Configuration.Operation.QueryAggregateTopOrderby) { options.setEnableCrossPartitionQuery(true); String sqlQuery = "Select top 1 value count(c) from c order by c._ts"; obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); } else if (configuration.getOperationType() == Configuration.Operation.QueryTopOrderby) { options.setEnableCrossPartitionQuery(true); String sqlQuery = "Select top 1000 * from c order by c._ts"; obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); } else if (configuration.getOperationType() == Configuration.Operation.QueryInClauseParallel) { ReadMyWriteWorkflow.QueryBuilder queryBuilder = new ReadMyWriteWorkflow.QueryBuilder(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(200); List<SqlParameter> parameters = new ArrayList<>(); int j = 0; for(Document doc: docsToRead) { String partitionKeyValue = doc.getId(); parameters.add(new SqlParameter("@param" + j, partitionKeyValue)); j++; } queryBuilder.whereClause(new ReadMyWriteWorkflow.QueryBuilder.WhereClause.InWhereClause(partitionKey, parameters)); SqlQuerySpec query = queryBuilder.toSqlQuerySpec(); obs = client.queryDocuments(getCollectionLink(), query, options); } else { throw new IllegalArgumentException("Unsupported Operation: " + configuration.getOperationType()); } LatencySubscriber<FeedResponse> latencySubscriber = new LatencySubscriber(baseSubscriber); latencySubscriber.context = latency.time(); obs.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber); }
concurrencyControlSemaphore.acquire();
protected void performWorkload(BaseSubscriber<FeedResponse<Document>> baseSubscriber, long i) throws InterruptedException { Flux<FeedResponse<Document>> obs; Random r = new Random(); FeedOptions options = new FeedOptions(); if (configuration.getOperationType() == Configuration.Operation.QueryCross) { int index = r.nextInt(1000); options.setEnableCrossPartitionQuery(true); String sqlQuery = "Select * from c where c._rid = \"" + docsToRead.get(index).getResourceId() + "\""; obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); } else if (configuration.getOperationType() == Configuration.Operation.QuerySingle) { int index = r.nextInt(1000); String pk = docsToRead.get(index).getString("pk"); options.partitionKey(new PartitionKey(pk)); String sqlQuery = "Select * from c where c.pk = \"" + pk + "\""; obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); } else if (configuration.getOperationType() == Configuration.Operation.QueryParallel) { options.maxItemCount(10); options.setEnableCrossPartitionQuery(true); String sqlQuery = "Select * from c"; obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); } else if (configuration.getOperationType() == Configuration.Operation.QueryOrderby) { options.maxItemCount(10); options.setEnableCrossPartitionQuery(true); String sqlQuery = "Select * from c order by c._ts"; obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); } else if (configuration.getOperationType() == Configuration.Operation.QueryAggregate) { options.maxItemCount(10); options.setEnableCrossPartitionQuery(true); String sqlQuery = "Select value max(c._ts) from c"; obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); } else if (configuration.getOperationType() == Configuration.Operation.QueryAggregateTopOrderby) { options.setEnableCrossPartitionQuery(true); String sqlQuery = "Select top 1 value count(c) from c order by c._ts"; obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); } else if (configuration.getOperationType() == Configuration.Operation.QueryTopOrderby) { options.setEnableCrossPartitionQuery(true); String sqlQuery = "Select top 1000 * from c order by c._ts"; obs = client.queryDocuments(getCollectionLink(), sqlQuery, options); } else if (configuration.getOperationType() == Configuration.Operation.QueryInClauseParallel) { ReadMyWriteWorkflow.QueryBuilder queryBuilder = new ReadMyWriteWorkflow.QueryBuilder(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(200); List<SqlParameter> parameters = new ArrayList<>(); int j = 0; for(Document doc: docsToRead) { String partitionKeyValue = doc.getId(); parameters.add(new SqlParameter("@param" + j, partitionKeyValue)); j++; } queryBuilder.whereClause(new ReadMyWriteWorkflow.QueryBuilder.WhereClause.InWhereClause(partitionKey, parameters)); SqlQuerySpec query = queryBuilder.toSqlQuerySpec(); obs = client.queryDocuments(getCollectionLink(), query, options); } else { throw new IllegalArgumentException("Unsupported Operation: " + configuration.getOperationType()); } concurrencyControlSemaphore.acquire(); LatencySubscriber<FeedResponse> latencySubscriber = new LatencySubscriber(baseSubscriber); latencySubscriber.context = latency.time(); obs.subscribeOn(Schedulers.parallel()).subscribe(latencySubscriber); }
class LatencySubscriber<T> extends BaseSubscriber<T> { Timer.Context context; BaseSubscriber<T> baseSubscriber; LatencySubscriber(BaseSubscriber<T> baseSubscriber) { this.baseSubscriber = baseSubscriber; } @Override protected void hookOnSubscribe(Subscription subscription) { super.hookOnSubscribe(subscription); } @Override protected void hookOnNext(T value) { } @Override protected void hookOnComplete() { context.stop(); baseSubscriber.onComplete(); } @Override protected void hookOnError(Throwable throwable) { context.stop(); baseSubscriber.onError(throwable); } }
class LatencySubscriber<T> extends BaseSubscriber<T> { Timer.Context context; BaseSubscriber<T> baseSubscriber; LatencySubscriber(BaseSubscriber<T> baseSubscriber) { this.baseSubscriber = baseSubscriber; } @Override protected void hookOnSubscribe(Subscription subscription) { super.hookOnSubscribe(subscription); } @Override protected void hookOnNext(T value) { } @Override protected void hookOnComplete() { context.stop(); baseSubscriber.onComplete(); } @Override protected void hookOnError(Throwable throwable) { context.stop(); baseSubscriber.onError(throwable); } }
does it really starts a span? we'll start another one somewhere later - why do we need two?
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.info("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.info("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.info("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = isTracingEnabled ? tracerProvider.startSpan(parentContext.get(), ProcessKind.LINK) : null; final List<Message> messages = batch.getEvents().stream().map(event -> { final Message message = messageSerializer.serialize(event); if (isTracingEnabled) { tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; }).collect(Collectors.toList()); return getSendLink(batch.getPartitionId()) .flatMap(link -> { if (isTracingEnabled) { Context userSpanContext = sharedContext; Context entityContext = userSpanContext.addData(ENTITY_PATH_KEY, link.getEntityPath()); parentContext.set(tracerProvider.startSpan( entityContext.addData(HOST_NAME_KEY, link.getHostname()), ProcessKind.SEND)); } return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); }).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); }
Context sharedContext = isTracingEnabled ? tracerProvider.startSpan(parentContext.get(), ProcessKind.LINK) :
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.info("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.info("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.info("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } messages.add(message); } Context finalSharedContext = sharedContext; return getSendLink(batch.getPartitionId()) .flatMap(link -> { if (isTracingEnabled) { Context entityContext = finalSharedContext.addData(ENTITY_PATH_KEY, link.getEntityPath()); parentContext.set(tracerProvider.startSpan( entityContext.addData(HOST_NAME_KEY, link.getHostname()), ProcessKind.SEND)); } return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); }).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); /** * Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service * load balance messages is the eventHubName. */ private final ConcurrentHashMap<String, AmqpSendLink> openLinks = new ConcurrentHashMap<>(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnection connection; private final AmqpRetryOptions retryOptions; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final boolean isSharedConnection; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnection connection, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, boolean isSharedConnection) { this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.eventHubName = eventHubName; this.connection = connection; this.retryOptions = retryOptions; this.tracerProvider = tracerProvider; this.messageSerializer = messageSerializer; this.isSharedConnection = isSharedConnection; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * * @return The set of information for the requested partition under the Event Hub this client is associated with. * * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connection.getManagementNode().flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private String getLinkName(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? StringUtil.getRandomString("EC") : StringUtil.getRandomString("PS"); } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final AmqpSendLink openLink = openLinks.get(entityPath); if (openLink != null) { return Mono.just(openLink); } else { return connection.createSendLink(getLinkName(partitionId), entityPath, retryOptions) .map(link -> openLinks.computeIfAbsent(entityPath, unusedKey -> link)); } } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } openLinks.forEach((key, value) -> value.close()); openLinks.clear(); if (!isSharedConnection) { connection.close(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); /** * Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service * load balance messages is the eventHubName. */ private final ConcurrentHashMap<String, AmqpSendLink> openLinks = new ConcurrentHashMap<>(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnection connection; private final AmqpRetryOptions retryOptions; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final boolean isSharedConnection; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnection connection, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, boolean isSharedConnection) { this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.eventHubName = eventHubName; this.connection = connection; this.retryOptions = retryOptions; this.tracerProvider = tracerProvider; this.messageSerializer = messageSerializer; this.isSharedConnection = isSharedConnection; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * * @return The set of information for the requested partition under the Event Hub this client is associated with. * * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connection.getManagementNode().flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private String getLinkName(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? StringUtil.getRandomString("EC") : StringUtil.getRandomString("PS"); } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final AmqpSendLink openLink = openLinks.get(entityPath); if (openLink != null) { return Mono.just(openLink); } else { return connection.createSendLink(getLinkName(partitionId), entityPath, retryOptions) .map(link -> openLinks.computeIfAbsent(entityPath, unusedKey -> link)); } } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } openLinks.forEach((key, value) -> value.close()); openLinks.clear(); if (!isSharedConnection) { connection.close(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
will this span reuse span builder we created before?
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.info("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.info("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.info("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = isTracingEnabled ? tracerProvider.startSpan(parentContext.get(), ProcessKind.LINK) : null; final List<Message> messages = batch.getEvents().stream().map(event -> { final Message message = messageSerializer.serialize(event); if (isTracingEnabled) { tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; }).collect(Collectors.toList()); return getSendLink(batch.getPartitionId()) .flatMap(link -> { if (isTracingEnabled) { Context userSpanContext = sharedContext; Context entityContext = userSpanContext.addData(ENTITY_PATH_KEY, link.getEntityPath()); parentContext.set(tracerProvider.startSpan( entityContext.addData(HOST_NAME_KEY, link.getHostname()), ProcessKind.SEND)); } return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); }).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); }
return messages.size() == 1
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.info("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.info("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.info("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } messages.add(message); } Context finalSharedContext = sharedContext; return getSendLink(batch.getPartitionId()) .flatMap(link -> { if (isTracingEnabled) { Context entityContext = finalSharedContext.addData(ENTITY_PATH_KEY, link.getEntityPath()); parentContext.set(tracerProvider.startSpan( entityContext.addData(HOST_NAME_KEY, link.getHostname()), ProcessKind.SEND)); } return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); }).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); /** * Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service * load balance messages is the eventHubName. */ private final ConcurrentHashMap<String, AmqpSendLink> openLinks = new ConcurrentHashMap<>(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnection connection; private final AmqpRetryOptions retryOptions; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final boolean isSharedConnection; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnection connection, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, boolean isSharedConnection) { this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.eventHubName = eventHubName; this.connection = connection; this.retryOptions = retryOptions; this.tracerProvider = tracerProvider; this.messageSerializer = messageSerializer; this.isSharedConnection = isSharedConnection; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * * @return The set of information for the requested partition under the Event Hub this client is associated with. * * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connection.getManagementNode().flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private String getLinkName(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? StringUtil.getRandomString("EC") : StringUtil.getRandomString("PS"); } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final AmqpSendLink openLink = openLinks.get(entityPath); if (openLink != null) { return Mono.just(openLink); } else { return connection.createSendLink(getLinkName(partitionId), entityPath, retryOptions) .map(link -> openLinks.computeIfAbsent(entityPath, unusedKey -> link)); } } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } openLinks.forEach((key, value) -> value.close()); openLinks.clear(); if (!isSharedConnection) { connection.close(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); /** * Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service * load balance messages is the eventHubName. */ private final ConcurrentHashMap<String, AmqpSendLink> openLinks = new ConcurrentHashMap<>(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnection connection; private final AmqpRetryOptions retryOptions; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final boolean isSharedConnection; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnection connection, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, boolean isSharedConnection) { this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.eventHubName = eventHubName; this.connection = connection; this.retryOptions = retryOptions; this.tracerProvider = tracerProvider; this.messageSerializer = messageSerializer; this.isSharedConnection = isSharedConnection; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * * @return The set of information for the requested partition under the Event Hub this client is associated with. * * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connection.getManagementNode().flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private String getLinkName(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? StringUtil.getRandomString("EC") : StringUtil.getRandomString("PS"); } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final AmqpSendLink openLink = openLinks.get(entityPath); if (openLink != null) { return Mono.just(openLink); } else { return connection.createSendLink(getLinkName(partitionId), entityPath, retryOptions) .map(link -> openLinks.computeIfAbsent(entityPath, unusedKey -> link)); } } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } openLinks.forEach((key, value) -> value.close()); openLinks.clear(); if (!isSharedConnection) { connection.close(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
Any particular reason to use 100_000 ?
private int fluxSequentialMergePrefetch(FeedOptions options, int numberOfPartitions, int pageSize, int fluxConcurrency) { int maxBufferedItemCount = options.getMaxBufferedItemCount(); if (maxBufferedItemCount <= 0) { maxBufferedItemCount = Math.min(Configs.CPU_CNT * numberOfPartitions * pageSize, 100_000); } int fluxPrefetch = Math.max(maxBufferedItemCount / (Math.max(fluxConcurrency * pageSize, 1)), 1); return Math.min(fluxPrefetch, Queues.XS_BUFFER_SIZE); }
maxBufferedItemCount = Math.min(Configs.CPU_CNT * numberOfPartitions * pageSize, 100_000);
private int fluxSequentialMergePrefetch(FeedOptions options, int numberOfPartitions, int pageSize, int fluxConcurrency) { int maxBufferedItemCount = options.getMaxBufferedItemCount(); if (maxBufferedItemCount <= 0) { maxBufferedItemCount = Math.min(Configs.getCPUCnt() * numberOfPartitions * pageSize, 100_000); } int fluxPrefetch = Math.max(maxBufferedItemCount / (Math.max(fluxConcurrency * pageSize, 1)), 1); return Math.min(fluxPrefetch, Queues.XS_BUFFER_SIZE); }
class EmptyPagesFilterTransformer<T extends Resource> implements Function<Flux<DocumentProducer<T>.DocumentProducerFeedResponse>, Flux<FeedResponse<T>>> { private final RequestChargeTracker tracker; private DocumentProducer<T>.DocumentProducerFeedResponse previousPage; public EmptyPagesFilterTransformer( RequestChargeTracker tracker) { if (tracker == null) { throw new IllegalArgumentException("Request Charge Tracker must not be null."); } this.tracker = tracker; this.previousPage = null; } private DocumentProducer<T>.DocumentProducerFeedResponse plusCharge( DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse, double charge) { FeedResponse<T> page = documentProducerFeedResponse.pageResult; Map<String, String> headers = new HashMap<>(page.getResponseHeaders()); double pageCharge = page.getRequestCharge(); pageCharge += charge; headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, String.valueOf(pageCharge)); FeedResponse<T> newPage = BridgeInternal.createFeedResponseWithQueryMetrics(page.getResults(), headers, BridgeInternal.queryMetricsFromFeedResponse(page)); documentProducerFeedResponse.pageResult = newPage; return documentProducerFeedResponse; } private DocumentProducer<T>.DocumentProducerFeedResponse addCompositeContinuationToken( DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse, String compositeContinuationToken) { FeedResponse<T> page = documentProducerFeedResponse.pageResult; Map<String, String> headers = new HashMap<>(page.getResponseHeaders()); headers.put(HttpConstants.HttpHeaders.CONTINUATION, compositeContinuationToken); FeedResponse<T> newPage = BridgeInternal.createFeedResponseWithQueryMetrics(page.getResults(), headers, BridgeInternal.queryMetricsFromFeedResponse(page)); documentProducerFeedResponse.pageResult = newPage; return documentProducerFeedResponse; } private static Map<String, String> headerResponse( double requestCharge) { return Utils.immutableMapOf(HttpConstants.HttpHeaders.REQUEST_CHARGE, String.valueOf(requestCharge)); } @Override public Flux<FeedResponse<T>> apply(Flux<DocumentProducer<T>.DocumentProducerFeedResponse> source) { return source.filter(documentProducerFeedResponse -> { if (documentProducerFeedResponse.pageResult.getResults().isEmpty()) { tracker.addCharge(documentProducerFeedResponse.pageResult.getRequestCharge()); return false; } return true; }).map(documentProducerFeedResponse -> { double charge = tracker.getAndResetCharge(); if (charge > 0) { return new ValueHolder<>(plusCharge(documentProducerFeedResponse, charge)); } else { return new ValueHolder<>(documentProducerFeedResponse); } }).concatWith(Flux.just(new ValueHolder<>(null))).map(heldValue -> { DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse = heldValue.v; ImmutablePair<DocumentProducer<T>.DocumentProducerFeedResponse, DocumentProducer<T>.DocumentProducerFeedResponse> previousCurrent = new ImmutablePair<>( this.previousPage, documentProducerFeedResponse); this.previousPage = documentProducerFeedResponse; return previousCurrent; }).skip(1).map(currentNext -> { DocumentProducer<T>.DocumentProducerFeedResponse current = currentNext.left; DocumentProducer<T>.DocumentProducerFeedResponse next = currentNext.right; String compositeContinuationToken; String backendContinuationToken = current.pageResult.getContinuationToken(); if (backendContinuationToken == null) { if (next == null) { compositeContinuationToken = null; } else { CompositeContinuationToken compositeContinuationTokenDom = new CompositeContinuationToken(null, next.sourcePartitionKeyRange.toRange()); compositeContinuationToken = compositeContinuationTokenDom.toJson(); } } else { CompositeContinuationToken compositeContinuationTokenDom = new CompositeContinuationToken( backendContinuationToken, current.sourcePartitionKeyRange.toRange()); compositeContinuationToken = compositeContinuationTokenDom.toJson(); } DocumentProducer<T>.DocumentProducerFeedResponse page; page = current; page = this.addCompositeContinuationToken(page, compositeContinuationToken); return page; }).map(documentProducerFeedResponse -> { return documentProducerFeedResponse.pageResult; }).switchIfEmpty(Flux.defer(() -> { return Flux.just(BridgeInternal.createFeedResponse(Utils.immutableListOf(), headerResponse(tracker.getAndResetCharge()))); })); } }
class EmptyPagesFilterTransformer<T extends Resource> implements Function<Flux<DocumentProducer<T>.DocumentProducerFeedResponse>, Flux<FeedResponse<T>>> { private final RequestChargeTracker tracker; private DocumentProducer<T>.DocumentProducerFeedResponse previousPage; public EmptyPagesFilterTransformer( RequestChargeTracker tracker) { if (tracker == null) { throw new IllegalArgumentException("Request Charge Tracker must not be null."); } this.tracker = tracker; this.previousPage = null; } private DocumentProducer<T>.DocumentProducerFeedResponse plusCharge( DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse, double charge) { FeedResponse<T> page = documentProducerFeedResponse.pageResult; Map<String, String> headers = new HashMap<>(page.getResponseHeaders()); double pageCharge = page.getRequestCharge(); pageCharge += charge; headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, String.valueOf(pageCharge)); FeedResponse<T> newPage = BridgeInternal.createFeedResponseWithQueryMetrics(page.getResults(), headers, BridgeInternal.queryMetricsFromFeedResponse(page)); documentProducerFeedResponse.pageResult = newPage; return documentProducerFeedResponse; } private DocumentProducer<T>.DocumentProducerFeedResponse addCompositeContinuationToken( DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse, String compositeContinuationToken) { FeedResponse<T> page = documentProducerFeedResponse.pageResult; Map<String, String> headers = new HashMap<>(page.getResponseHeaders()); headers.put(HttpConstants.HttpHeaders.CONTINUATION, compositeContinuationToken); FeedResponse<T> newPage = BridgeInternal.createFeedResponseWithQueryMetrics(page.getResults(), headers, BridgeInternal.queryMetricsFromFeedResponse(page)); documentProducerFeedResponse.pageResult = newPage; return documentProducerFeedResponse; } private static Map<String, String> headerResponse( double requestCharge) { return Utils.immutableMapOf(HttpConstants.HttpHeaders.REQUEST_CHARGE, String.valueOf(requestCharge)); } @Override public Flux<FeedResponse<T>> apply(Flux<DocumentProducer<T>.DocumentProducerFeedResponse> source) { return source.filter(documentProducerFeedResponse -> { if (documentProducerFeedResponse.pageResult.getResults().isEmpty()) { tracker.addCharge(documentProducerFeedResponse.pageResult.getRequestCharge()); return false; } return true; }).map(documentProducerFeedResponse -> { double charge = tracker.getAndResetCharge(); if (charge > 0) { return new ValueHolder<>(plusCharge(documentProducerFeedResponse, charge)); } else { return new ValueHolder<>(documentProducerFeedResponse); } }).concatWith(Flux.just(new ValueHolder<>(null))).map(heldValue -> { DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse = heldValue.v; ImmutablePair<DocumentProducer<T>.DocumentProducerFeedResponse, DocumentProducer<T>.DocumentProducerFeedResponse> previousCurrent = new ImmutablePair<>( this.previousPage, documentProducerFeedResponse); this.previousPage = documentProducerFeedResponse; return previousCurrent; }).skip(1).map(currentNext -> { DocumentProducer<T>.DocumentProducerFeedResponse current = currentNext.left; DocumentProducer<T>.DocumentProducerFeedResponse next = currentNext.right; String compositeContinuationToken; String backendContinuationToken = current.pageResult.getContinuationToken(); if (backendContinuationToken == null) { if (next == null) { compositeContinuationToken = null; } else { CompositeContinuationToken compositeContinuationTokenDom = new CompositeContinuationToken(null, next.sourcePartitionKeyRange.toRange()); compositeContinuationToken = compositeContinuationTokenDom.toJson(); } } else { CompositeContinuationToken compositeContinuationTokenDom = new CompositeContinuationToken( backendContinuationToken, current.sourcePartitionKeyRange.toRange()); compositeContinuationToken = compositeContinuationTokenDom.toJson(); } DocumentProducer<T>.DocumentProducerFeedResponse page; page = current; page = this.addCompositeContinuationToken(page, compositeContinuationToken); return page; }).map(documentProducerFeedResponse -> { return documentProducerFeedResponse.pageResult; }).switchIfEmpty(Flux.defer(() -> { return Flux.just(BridgeInternal.createFeedResponse(Utils.immutableListOf(), headerResponse(tracker.getAndResetCharge()))); })); } }
yes, the sharedContext on [this](https://github.com/Azure/azure-sdk-for-java/pull/6773/files#diff-afcf12d6b6264dbb96ae63baef89082dR402) line, should get us the span builder returned by the `ProcessKind.LINK`.
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.info("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.info("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.info("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = isTracingEnabled ? tracerProvider.startSpan(parentContext.get(), ProcessKind.LINK) : null; final List<Message> messages = batch.getEvents().stream().map(event -> { final Message message = messageSerializer.serialize(event); if (isTracingEnabled) { tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; }).collect(Collectors.toList()); return getSendLink(batch.getPartitionId()) .flatMap(link -> { if (isTracingEnabled) { Context userSpanContext = sharedContext; Context entityContext = userSpanContext.addData(ENTITY_PATH_KEY, link.getEntityPath()); parentContext.set(tracerProvider.startSpan( entityContext.addData(HOST_NAME_KEY, link.getHostname()), ProcessKind.SEND)); } return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); }).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); }
return messages.size() == 1
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.info("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.info("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.info("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } messages.add(message); } Context finalSharedContext = sharedContext; return getSendLink(batch.getPartitionId()) .flatMap(link -> { if (isTracingEnabled) { Context entityContext = finalSharedContext.addData(ENTITY_PATH_KEY, link.getEntityPath()); parentContext.set(tracerProvider.startSpan( entityContext.addData(HOST_NAME_KEY, link.getHostname()), ProcessKind.SEND)); } return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); }).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); /** * Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service * load balance messages is the eventHubName. */ private final ConcurrentHashMap<String, AmqpSendLink> openLinks = new ConcurrentHashMap<>(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnection connection; private final AmqpRetryOptions retryOptions; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final boolean isSharedConnection; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnection connection, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, boolean isSharedConnection) { this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.eventHubName = eventHubName; this.connection = connection; this.retryOptions = retryOptions; this.tracerProvider = tracerProvider; this.messageSerializer = messageSerializer; this.isSharedConnection = isSharedConnection; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * * @return The set of information for the requested partition under the Event Hub this client is associated with. * * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connection.getManagementNode().flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private String getLinkName(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? StringUtil.getRandomString("EC") : StringUtil.getRandomString("PS"); } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final AmqpSendLink openLink = openLinks.get(entityPath); if (openLink != null) { return Mono.just(openLink); } else { return connection.createSendLink(getLinkName(partitionId), entityPath, retryOptions) .map(link -> openLinks.computeIfAbsent(entityPath, unusedKey -> link)); } } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } openLinks.forEach((key, value) -> value.close()); openLinks.clear(); if (!isSharedConnection) { connection.close(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); /** * Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service * load balance messages is the eventHubName. */ private final ConcurrentHashMap<String, AmqpSendLink> openLinks = new ConcurrentHashMap<>(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnection connection; private final AmqpRetryOptions retryOptions; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final boolean isSharedConnection; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnection connection, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, boolean isSharedConnection) { this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.eventHubName = eventHubName; this.connection = connection; this.retryOptions = retryOptions; this.tracerProvider = tracerProvider; this.messageSerializer = messageSerializer; this.isSharedConnection = isSharedConnection; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * * @return The set of information for the requested partition under the Event Hub this client is associated with. * * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connection.getManagementNode().flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private String getLinkName(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? StringUtil.getRandomString("EC") : StringUtil.getRandomString("PS"); } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final AmqpSendLink openLink = openLinks.get(entityPath); if (openLink != null) { return Mono.just(openLink); } else { return connection.createSendLink(getLinkName(partitionId), entityPath, retryOptions) .map(link -> openLinks.computeIfAbsent(entityPath, unusedKey -> link)); } } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } openLinks.forEach((key, value) -> value.close()); openLinks.clear(); if (!isSharedConnection) { connection.close(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
no, it will just return a span builder created with "Azure.eventhubs.send" name. This one is just to get the builder that needs to be used for linking. Wanted to keep the naming consistent with the other methods and so used an existing one.
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.info("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.info("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.info("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = isTracingEnabled ? tracerProvider.startSpan(parentContext.get(), ProcessKind.LINK) : null; final List<Message> messages = batch.getEvents().stream().map(event -> { final Message message = messageSerializer.serialize(event); if (isTracingEnabled) { tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; }).collect(Collectors.toList()); return getSendLink(batch.getPartitionId()) .flatMap(link -> { if (isTracingEnabled) { Context userSpanContext = sharedContext; Context entityContext = userSpanContext.addData(ENTITY_PATH_KEY, link.getEntityPath()); parentContext.set(tracerProvider.startSpan( entityContext.addData(HOST_NAME_KEY, link.getHostname()), ProcessKind.SEND)); } return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); }).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); }
Context sharedContext = isTracingEnabled ? tracerProvider.startSpan(parentContext.get(), ProcessKind.LINK) :
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.info("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.info("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.info("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } messages.add(message); } Context finalSharedContext = sharedContext; return getSendLink(batch.getPartitionId()) .flatMap(link -> { if (isTracingEnabled) { Context entityContext = finalSharedContext.addData(ENTITY_PATH_KEY, link.getEntityPath()); parentContext.set(tracerProvider.startSpan( entityContext.addData(HOST_NAME_KEY, link.getHostname()), ProcessKind.SEND)); } return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); }).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); /** * Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service * load balance messages is the eventHubName. */ private final ConcurrentHashMap<String, AmqpSendLink> openLinks = new ConcurrentHashMap<>(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnection connection; private final AmqpRetryOptions retryOptions; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final boolean isSharedConnection; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnection connection, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, boolean isSharedConnection) { this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.eventHubName = eventHubName; this.connection = connection; this.retryOptions = retryOptions; this.tracerProvider = tracerProvider; this.messageSerializer = messageSerializer; this.isSharedConnection = isSharedConnection; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * * @return The set of information for the requested partition under the Event Hub this client is associated with. * * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connection.getManagementNode().flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private String getLinkName(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? StringUtil.getRandomString("EC") : StringUtil.getRandomString("PS"); } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final AmqpSendLink openLink = openLinks.get(entityPath); if (openLink != null) { return Mono.just(openLink); } else { return connection.createSendLink(getLinkName(partitionId), entityPath, retryOptions) .map(link -> openLinks.computeIfAbsent(entityPath, unusedKey -> link)); } } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } openLinks.forEach((key, value) -> value.close()); openLinks.clear(); if (!isSharedConnection) { connection.close(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); /** * Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service * load balance messages is the eventHubName. */ private final ConcurrentHashMap<String, AmqpSendLink> openLinks = new ConcurrentHashMap<>(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnection connection; private final AmqpRetryOptions retryOptions; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final boolean isSharedConnection; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnection connection, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, boolean isSharedConnection) { this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.eventHubName = eventHubName; this.connection = connection; this.retryOptions = retryOptions; this.tracerProvider = tracerProvider; this.messageSerializer = messageSerializer; this.isSharedConnection = isSharedConnection; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * * @return The set of information for the requested partition under the Event Hub this client is associated with. * * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connection.getManagementNode().flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private String getLinkName(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? StringUtil.getRandomString("EC") : StringUtil.getRandomString("PS"); } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final AmqpSendLink openLink = openLinks.get(entityPath); if (openLink != null) { return Mono.just(openLink); } else { return connection.createSendLink(getLinkName(partitionId), entityPath, retryOptions) .map(link -> openLinks.computeIfAbsent(entityPath, unusedKey -> link)); } } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } openLinks.forEach((key, value) -> value.close()); openLinks.clear(); if (!isSharedConnection) { connection.close(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
I see. I understand the intention to stay in the existing public API boundaries, but it seems to be very misleading. startSpan sometimes start span, sometimes just creates a builder in the context: it's completely custom behavior for each kind. It seems very hard to read and maintain. Also I believe it makes almost impossible for someone to implement tracer - they need to understand all the rules hidden in https://github.com/Azure/azure-sdk-for-java/blob/9076e2f7bb141d9be4ad9c788c5fa7c4cf12795c/sdk/core/azure-core-tracing-opentelemetry/src/main/java/com/azure/core/tracing/opentelemetry/OpenTelemetryTracer.java#L67..L88 While it's surely up to you how to proceed, I suggest to split it into a different method. Or at least leave a comment/hints that explain what's going on here.
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.info("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.info("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.info("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = isTracingEnabled ? tracerProvider.startSpan(parentContext.get(), ProcessKind.LINK) : null; final List<Message> messages = batch.getEvents().stream().map(event -> { final Message message = messageSerializer.serialize(event); if (isTracingEnabled) { tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; }).collect(Collectors.toList()); return getSendLink(batch.getPartitionId()) .flatMap(link -> { if (isTracingEnabled) { Context userSpanContext = sharedContext; Context entityContext = userSpanContext.addData(ENTITY_PATH_KEY, link.getEntityPath()); parentContext.set(tracerProvider.startSpan( entityContext.addData(HOST_NAME_KEY, link.getHostname()), ProcessKind.SEND)); } return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); }).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); }
Context sharedContext = isTracingEnabled ? tracerProvider.startSpan(parentContext.get(), ProcessKind.LINK) :
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.info("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.info("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.info("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } messages.add(message); } Context finalSharedContext = sharedContext; return getSendLink(batch.getPartitionId()) .flatMap(link -> { if (isTracingEnabled) { Context entityContext = finalSharedContext.addData(ENTITY_PATH_KEY, link.getEntityPath()); parentContext.set(tracerProvider.startSpan( entityContext.addData(HOST_NAME_KEY, link.getHostname()), ProcessKind.SEND)); } return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); }).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); /** * Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service * load balance messages is the eventHubName. */ private final ConcurrentHashMap<String, AmqpSendLink> openLinks = new ConcurrentHashMap<>(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnection connection; private final AmqpRetryOptions retryOptions; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final boolean isSharedConnection; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnection connection, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, boolean isSharedConnection) { this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.eventHubName = eventHubName; this.connection = connection; this.retryOptions = retryOptions; this.tracerProvider = tracerProvider; this.messageSerializer = messageSerializer; this.isSharedConnection = isSharedConnection; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * * @return The set of information for the requested partition under the Event Hub this client is associated with. * * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connection.getManagementNode().flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private String getLinkName(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? StringUtil.getRandomString("EC") : StringUtil.getRandomString("PS"); } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final AmqpSendLink openLink = openLinks.get(entityPath); if (openLink != null) { return Mono.just(openLink); } else { return connection.createSendLink(getLinkName(partitionId), entityPath, retryOptions) .map(link -> openLinks.computeIfAbsent(entityPath, unusedKey -> link)); } } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } openLinks.forEach((key, value) -> value.close()); openLinks.clear(); if (!isSharedConnection) { connection.close(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); /** * Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service * load balance messages is the eventHubName. */ private final ConcurrentHashMap<String, AmqpSendLink> openLinks = new ConcurrentHashMap<>(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnection connection; private final AmqpRetryOptions retryOptions; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final boolean isSharedConnection; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnection connection, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, boolean isSharedConnection) { this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.eventHubName = eventHubName; this.connection = connection; this.retryOptions = retryOptions; this.tracerProvider = tracerProvider; this.messageSerializer = messageSerializer; this.isSharedConnection = isSharedConnection; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * * @return The set of information for the requested partition under the Event Hub this client is associated with. * * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connection.getManagementNode().flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private String getLinkName(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? StringUtil.getRandomString("EC") : StringUtil.getRandomString("PS"); } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final AmqpSendLink openLink = openLinks.get(entityPath); if (openLink != null) { return Mono.just(openLink); } else { return connection.createSendLink(getLinkName(partitionId), entityPath, retryOptions) .map(link -> openLinks.computeIfAbsent(entityPath, unusedKey -> link)); } } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } openLinks.forEach((key, value) -> value.close()); openLinks.clear(); if (!isSharedConnection) { connection.close(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
should we rather just add a new method `getSpanBuilder(Span name)` instead?
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.info("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.info("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.info("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = isTracingEnabled ? tracerProvider.startSpan(parentContext.get(), ProcessKind.LINK) : null; final List<Message> messages = batch.getEvents().stream().map(event -> { final Message message = messageSerializer.serialize(event); if (isTracingEnabled) { tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; }).collect(Collectors.toList()); return getSendLink(batch.getPartitionId()) .flatMap(link -> { if (isTracingEnabled) { Context userSpanContext = sharedContext; Context entityContext = userSpanContext.addData(ENTITY_PATH_KEY, link.getEntityPath()); parentContext.set(tracerProvider.startSpan( entityContext.addData(HOST_NAME_KEY, link.getHostname()), ProcessKind.SEND)); } return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); }).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); }
Context sharedContext = isTracingEnabled ? tracerProvider.startSpan(parentContext.get(), ProcessKind.LINK) :
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.info("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.info("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.info("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } messages.add(message); } Context finalSharedContext = sharedContext; return getSendLink(batch.getPartitionId()) .flatMap(link -> { if (isTracingEnabled) { Context entityContext = finalSharedContext.addData(ENTITY_PATH_KEY, link.getEntityPath()); parentContext.set(tracerProvider.startSpan( entityContext.addData(HOST_NAME_KEY, link.getHostname()), ProcessKind.SEND)); } return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); }).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); /** * Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service * load balance messages is the eventHubName. */ private final ConcurrentHashMap<String, AmqpSendLink> openLinks = new ConcurrentHashMap<>(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnection connection; private final AmqpRetryOptions retryOptions; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final boolean isSharedConnection; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnection connection, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, boolean isSharedConnection) { this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.eventHubName = eventHubName; this.connection = connection; this.retryOptions = retryOptions; this.tracerProvider = tracerProvider; this.messageSerializer = messageSerializer; this.isSharedConnection = isSharedConnection; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * * @return The set of information for the requested partition under the Event Hub this client is associated with. * * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connection.getManagementNode().flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private String getLinkName(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? StringUtil.getRandomString("EC") : StringUtil.getRandomString("PS"); } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final AmqpSendLink openLink = openLinks.get(entityPath); if (openLink != null) { return Mono.just(openLink); } else { return connection.createSendLink(getLinkName(partitionId), entityPath, retryOptions) .map(link -> openLinks.computeIfAbsent(entityPath, unusedKey -> link)); } } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } openLinks.forEach((key, value) -> value.close()); openLinks.clear(); if (!isSharedConnection) { connection.close(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); /** * Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service * load balance messages is the eventHubName. */ private final ConcurrentHashMap<String, AmqpSendLink> openLinks = new ConcurrentHashMap<>(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnection connection; private final AmqpRetryOptions retryOptions; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final boolean isSharedConnection; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnection connection, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, boolean isSharedConnection) { this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.eventHubName = eventHubName; this.connection = connection; this.retryOptions = retryOptions; this.tracerProvider = tracerProvider; this.messageSerializer = messageSerializer; this.isSharedConnection = isSharedConnection; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * * @return The set of information for the requested partition under the Event Hub this client is associated with. * * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connection.getManagementNode().flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private String getLinkName(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? StringUtil.getRandomString("EC") : StringUtil.getRandomString("PS"); } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final AmqpSendLink openLink = openLinks.get(entityPath); if (openLink != null) { return Mono.just(openLink); } else { return connection.createSendLink(getLinkName(partitionId), entityPath, retryOptions) .map(link -> openLinks.computeIfAbsent(entityPath, unusedKey -> link)); } } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } openLinks.forEach((key, value) -> value.close()); openLinks.clear(); if (!isSharedConnection) { connection.close(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
would it be better to throw an UnsupportedOperationException rather than return something that won't work?
public Context getSharedSpanBuilder(String spanName, Context context) { return Context.NONE; }
}
public Context getSharedSpanBuilder(String spanName, Context context) { throw logger.logExceptionAsError( new UnsupportedOperationException("This operation is not supported in OpenCensus")); }
class OpenCensusTracer implements com.azure.core.util.tracing.Tracer { private static final Tracer TRACER = Tracing.getTracer(); static final String COMPONENT = "component"; static final String MESSAGE_BUS_DESTINATION = "message_bus.destination"; static final String PEER_ENDPOINT = "peer.address"; private final ClientLogger logger = new ClientLogger(OpenCensusTracer.class); /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); SpanBuilder spanBuilder = getSpanBuilder(spanName, context); Span span = spanBuilder.startSpan(); return context.addData(PARENT_SPAN_KEY, span); } /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context, ProcessKind processKind) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); Objects.requireNonNull(processKind, "'processKind' cannot be null."); Span span; SpanBuilder spanBuilder; switch (processKind) { case SEND: spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.setSpanKind(Span.Kind.CLIENT).startSpan(); if (span.getOptions().contains(Span.Options.RECORD_EVENTS)) { addSpanRequestAttributes(span, context, spanName); } return context.addData(PARENT_SPAN_KEY, span); case MESSAGE: spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.startSpan(); context = setContextData(span); return context.addData(PARENT_SPAN_KEY, span); case PROCESS: return startScopedSpan(spanName, context); default: return Context.NONE; } } /** * {@inheritDoc} */ @Override public void end(int responseCode, Throwable throwable, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span == null) { return; } if (span.getOptions().contains(Options.RECORD_EVENTS)) { span.setStatus(HttpTraceUtil.parseResponseStatus(responseCode, throwable)); } span.end(); } /** * {@inheritDoc} */ @Override public void setAttribute(String key, String value, Context context) { if (CoreUtils.isNullOrEmpty(value)) { logger.info("Failed to set span attribute since value is null or empty."); return; } final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span != null) { span.putAttribute(key, AttributeValue.stringAttributeValue(value)); } else { logger.warning("Failed to find span to add attribute."); } } /** * {@inheritDoc} */ @Override public Context setSpanName(String spanName, Context context) { return context.addData(USER_SPAN_NAME_KEY, spanName); } /** * {@inheritDoc} */ @Override public void end(String statusMessage, Throwable throwable, Context context) { final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span == null) { logger.warning("Failed to find span to end it."); return; } if (span.getOptions().contains(Options.RECORD_EVENTS)) { span.setStatus(AmqpTraceUtil.parseStatusMessage(statusMessage, throwable)); } span.end(); } /** * {@inheritDoc} */ @Override public void addLink(Context context) { final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span == null) { logger.warning("Failed to find span to link it."); return; } final SpanContext spanContext = getOrDefault(context, SPAN_CONTEXT_KEY, null, SpanContext.class); if (spanContext == null) { logger.warning("Failed to find span context to link it."); return; } span.addLink(Link.fromSpanContext(spanContext, PARENT_LINKED_SPAN)); } /** * {@inheritDoc} */ @Override public Context extractContext(String diagnosticId, Context context) { return AmqpPropagationFormatUtil.extractContext(diagnosticId, context); } @Override /** * Starts a new child {@link Span} with parent being the remote and uses the {@link Span} is in the current Context, * to return an object that represents that scope. * <p>The scope is exited when the returned object is closed.</p> * * @param spanName The name of the returned Span. * @param context The {@link Context} containing the {@link SpanContext}. * @return The returned {@link Span} and the scope in a {@link Context} object. */ private Context startScopedSpan(String spanName, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); Span span; SpanContext spanContext = getOrDefault(context, SPAN_CONTEXT_KEY, null, SpanContext.class); if (spanContext != null) { span = startSpanWithRemoteParent(spanName, spanContext); } else { SpanBuilder spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.setSpanKind(Span.Kind.SERVER).startSpan(); } return context.addData(PARENT_SPAN_KEY, span).addData("scope", TRACER.withSpan(span)); } /** * Creates a {@link SpanBuilder} to create and start a new child {@link Span} with parent being the remote and * designated by the {@link SpanContext}. * * @param spanName The name of the returned Span. * @param spanContext The remote parent context of the returned Span. * @return A {@link Span} with parent being the remote {@link Span} designated by the {@link SpanContext}. */ private Span startSpanWithRemoteParent(String spanName, SpanContext spanContext) { SpanBuilder spanBuilder = TRACER.spanBuilderWithRemoteParent(spanName, spanContext); spanBuilder.setSpanKind(Span.Kind.SERVER); return spanBuilder.startSpan(); } /** * Extracts the {@link SpanContext trace identifiers} and the {@link SpanContext} of the current tracing span as * text and returns in a {@link Context} object. * * @param span The current tracing span. * @return The {@link Context} containing the {@link SpanContext} and traceparent of the current span. */ private Context setContextData(Span span) { SpanContext spanContext = span.getContext(); final String traceparent = AmqpPropagationFormatUtil.getDiagnosticId(spanContext); return new Context(DIAGNOSTIC_ID_KEY, traceparent).addData(SPAN_CONTEXT_KEY, spanContext); } /** * Extracts request attributes from the given {@code context} and adds it to the started span. * * @param span The span to which request attributes are to be added. * @param context The context containing the request attributes. * @param spanName The name of the returned Span containing the component value. */ private void addSpanRequestAttributes(Span span, Context context, String spanName) { Objects.requireNonNull(span, "'span' cannot be null."); span.putAttribute(COMPONENT, AttributeValue.stringAttributeValue(parseComponentValue(spanName))); span.putAttribute( MESSAGE_BUS_DESTINATION, AttributeValue.stringAttributeValue(getOrDefault(context, ENTITY_PATH_KEY, "", String.class))); span.putAttribute( PEER_ENDPOINT, AttributeValue.stringAttributeValue(getOrDefault(context, HOST_NAME_KEY, "", String.class))); } /** * Extracts the component name from the given span name. * * @param spanName The spanName containing the component name. * @return The component name contained in the context. */ private static String parseComponentValue(String spanName) { if (spanName != null && spanName.length() > 0) { int componentNameStartIndex = spanName.indexOf("."); int componentNameEndIndex = spanName.lastIndexOf("."); if (componentNameStartIndex != -1 && componentNameEndIndex != -1) { return spanName.substring(componentNameStartIndex + 1, componentNameEndIndex); } } return ""; } /** * Returns a {@link SpanBuilder} to create and start a new child {@link Span} with parent being the designated * {@code Span}. * * @param spanName The name of the returned Span. * @param context The context containing the span and the span name. * @return A {@link SpanBuilder} to create and start a new {@link Span}. */ private SpanBuilder getSpanBuilder(String spanName, Context context) { Span parentSpan = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); String spanNameKey = getOrDefault(context, USER_SPAN_NAME_KEY, null, String.class); if (spanNameKey == null) { spanNameKey = spanName; } if (parentSpan == null) { parentSpan = TRACER.getCurrentSpan(); } return TRACER.spanBuilderWithExplicitParent(spanNameKey, parentSpan); } /** * Returns the value of the specified key from the context. * * @param key The name of the attribute that needs to be extracted from the {@code Context}. * @param defaultValue the value to return in data not found. * @param clazz clazz the type of raw class to find data for. * @param context The context containing the specified key. * * @return The T type of raw class object */ @SuppressWarnings("unchecked") private <T> T getOrDefault(Context context, String key, T defaultValue, Class<T> clazz) { final Optional<Object> optional = context.getData(key); final Object result = optional.filter(value -> clazz.isAssignableFrom(value.getClass())).orElseGet(() -> { logger.warning("Could not extract key '{}' of type '{}' from context.", key, clazz); return defaultValue; }); return (T) result; } }
class OpenCensusTracer implements com.azure.core.util.tracing.Tracer { private static final Tracer TRACER = Tracing.getTracer(); static final String COMPONENT = "component"; static final String MESSAGE_BUS_DESTINATION = "message_bus.destination"; static final String PEER_ENDPOINT = "peer.address"; private final ClientLogger logger = new ClientLogger(OpenCensusTracer.class); /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); SpanBuilder spanBuilder = getSpanBuilder(spanName, context); Span span = spanBuilder.startSpan(); return context.addData(PARENT_SPAN_KEY, span); } /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context, ProcessKind processKind) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); Objects.requireNonNull(processKind, "'processKind' cannot be null."); Span span; SpanBuilder spanBuilder; switch (processKind) { case SEND: spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.setSpanKind(Span.Kind.CLIENT).startSpan(); if (span.getOptions().contains(Span.Options.RECORD_EVENTS)) { addSpanRequestAttributes(span, context, spanName); } return context.addData(PARENT_SPAN_KEY, span); case MESSAGE: spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.startSpan(); context = setContextData(span); return context.addData(PARENT_SPAN_KEY, span); case PROCESS: return startScopedSpan(spanName, context); default: return Context.NONE; } } /** * {@inheritDoc} */ @Override public void end(int responseCode, Throwable throwable, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span == null) { return; } if (span.getOptions().contains(Options.RECORD_EVENTS)) { span.setStatus(HttpTraceUtil.parseResponseStatus(responseCode, throwable)); } span.end(); } /** * {@inheritDoc} */ @Override public void setAttribute(String key, String value, Context context) { if (CoreUtils.isNullOrEmpty(value)) { logger.info("Failed to set span attribute since value is null or empty."); return; } final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span != null) { span.putAttribute(key, AttributeValue.stringAttributeValue(value)); } else { logger.warning("Failed to find span to add attribute."); } } /** * {@inheritDoc} */ @Override public Context setSpanName(String spanName, Context context) { return context.addData(USER_SPAN_NAME_KEY, spanName); } /** * {@inheritDoc} */ @Override public void end(String statusMessage, Throwable throwable, Context context) { final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span == null) { logger.warning("Failed to find span to end it."); return; } if (span.getOptions().contains(Options.RECORD_EVENTS)) { span.setStatus(AmqpTraceUtil.parseStatusMessage(statusMessage, throwable)); } span.end(); } /** * {@inheritDoc} */ @Override public void addLink(Context context) { final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span == null) { logger.warning("Failed to find span to link it."); return; } final SpanContext spanContext = getOrDefault(context, SPAN_CONTEXT_KEY, null, SpanContext.class); if (spanContext == null) { logger.warning("Failed to find span context to link it."); return; } span.addLink(Link.fromSpanContext(spanContext, PARENT_LINKED_SPAN)); } /** * {@inheritDoc} */ @Override public Context extractContext(String diagnosticId, Context context) { return AmqpPropagationFormatUtil.extractContext(diagnosticId, context); } @Override /** * Starts a new child {@link Span} with parent being the remote and uses the {@link Span} is in the current Context, * to return an object that represents that scope. * <p>The scope is exited when the returned object is closed.</p> * * @param spanName The name of the returned Span. * @param context The {@link Context} containing the {@link SpanContext}. * @return The returned {@link Span} and the scope in a {@link Context} object. */ private Context startScopedSpan(String spanName, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); Span span; SpanContext spanContext = getOrDefault(context, SPAN_CONTEXT_KEY, null, SpanContext.class); if (spanContext != null) { span = startSpanWithRemoteParent(spanName, spanContext); } else { SpanBuilder spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.setSpanKind(Span.Kind.SERVER).startSpan(); } return context.addData(PARENT_SPAN_KEY, span).addData("scope", TRACER.withSpan(span)); } /** * Creates a {@link SpanBuilder} to create and start a new child {@link Span} with parent being the remote and * designated by the {@link SpanContext}. * * @param spanName The name of the returned Span. * @param spanContext The remote parent context of the returned Span. * @return A {@link Span} with parent being the remote {@link Span} designated by the {@link SpanContext}. */ private Span startSpanWithRemoteParent(String spanName, SpanContext spanContext) { SpanBuilder spanBuilder = TRACER.spanBuilderWithRemoteParent(spanName, spanContext); spanBuilder.setSpanKind(Span.Kind.SERVER); return spanBuilder.startSpan(); } /** * Extracts the {@link SpanContext trace identifiers} and the {@link SpanContext} of the current tracing span as * text and returns in a {@link Context} object. * * @param span The current tracing span. * @return The {@link Context} containing the {@link SpanContext} and traceparent of the current span. */ private Context setContextData(Span span) { SpanContext spanContext = span.getContext(); final String traceparent = AmqpPropagationFormatUtil.getDiagnosticId(spanContext); return new Context(DIAGNOSTIC_ID_KEY, traceparent).addData(SPAN_CONTEXT_KEY, spanContext); } /** * Extracts request attributes from the given {@code context} and adds it to the started span. * * @param span The span to which request attributes are to be added. * @param context The context containing the request attributes. * @param spanName The name of the returned Span containing the component value. */ private void addSpanRequestAttributes(Span span, Context context, String spanName) { Objects.requireNonNull(span, "'span' cannot be null."); span.putAttribute(COMPONENT, AttributeValue.stringAttributeValue(parseComponentValue(spanName))); span.putAttribute( MESSAGE_BUS_DESTINATION, AttributeValue.stringAttributeValue(getOrDefault(context, ENTITY_PATH_KEY, "", String.class))); span.putAttribute( PEER_ENDPOINT, AttributeValue.stringAttributeValue(getOrDefault(context, HOST_NAME_KEY, "", String.class))); } /** * Extracts the component name from the given span name. * * @param spanName The spanName containing the component name. * @return The component name contained in the context. */ private static String parseComponentValue(String spanName) { if (spanName != null && spanName.length() > 0) { int componentNameEndIndex = spanName.lastIndexOf("."); if (componentNameEndIndex != -1) { return spanName.substring(0, componentNameEndIndex); } } return ""; } /** * Returns a {@link SpanBuilder} to create and start a new child {@link Span} with parent being the designated * {@code Span}. * * @param spanName The name of the returned Span. * @param context The context containing the span and the span name. * @return A {@link SpanBuilder} to create and start a new {@link Span}. */ private SpanBuilder getSpanBuilder(String spanName, Context context) { Span parentSpan = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); String spanNameKey = getOrDefault(context, USER_SPAN_NAME_KEY, null, String.class); if (spanNameKey == null) { spanNameKey = spanName; } if (parentSpan == null) { parentSpan = TRACER.getCurrentSpan(); } return TRACER.spanBuilderWithExplicitParent(spanNameKey, parentSpan); } /** * Returns the value of the specified key from the context. * * @param key The name of the attribute that needs to be extracted from the {@code Context}. * @param defaultValue the value to return in data not found. * @param clazz clazz the type of raw class to find data for. * @param context The context containing the specified key. * * @return The T type of raw class object */ @SuppressWarnings("unchecked") private <T> T getOrDefault(Context context, String key, T defaultValue, Class<T> clazz) { final Optional<Object> optional = context.getData(key); final Object result = optional.filter(value -> clazz.isAssignableFrom(value.getClass())).orElseGet(() -> { logger.warning("Could not extract key '{}' of type '{}' from context.", key, clazz); return defaultValue; }); return (T) result; } }
this is an odd indent level
public void startSpanProcessKindSend() { final SpanId parentSpanId = parentSpan.getContext().getSpanId(); final Span.Builder spanBuilder = tracer.spanBuilder(METHOD_NAME); final Context traceContext = tracingContext.addData(ENTITY_PATH_KEY, ENTITY_PATH_VALUE) .addData(HOST_NAME_KEY, HOSTNAME_VALUE).addData(SPAN_BUILDER_KEY, spanBuilder); final Context updatedContext = openTelemetryTracer.start(METHOD_NAME, traceContext, ProcessKind.SEND); assertSpanWithExplicitParent(updatedContext, parentSpanId); final ReadableSpan recordEventsSpan = (ReadableSpan) updatedContext.getData(PARENT_SPAN_KEY).get(); assertEquals(Span.Kind.PRODUCER, recordEventsSpan.toSpanData().getKind()); final Map<String, AttributeValue> attributeMap = recordEventsSpan.toSpanData().getAttributes(); assertEquals(attributeMap.get(COMPONENT), AttributeValue.stringAttributeValue(COMPONENT_VALUE)); assertEquals(attributeMap.get(MESSAGE_BUS_DESTINATION), AttributeValue.stringAttributeValue(ENTITY_PATH_VALUE)); assertEquals(attributeMap.get(PEER_ENDPOINT), AttributeValue.stringAttributeValue(HOSTNAME_VALUE)); }
.addData(HOST_NAME_KEY, HOSTNAME_VALUE).addData(SPAN_BUILDER_KEY, spanBuilder);
public void startSpanProcessKindSend() { final SpanId parentSpanId = parentSpan.getContext().getSpanId(); final Span.Builder spanBuilder = tracer.spanBuilder(METHOD_NAME); final Context traceContext = tracingContext.addData(ENTITY_PATH_KEY, ENTITY_PATH_VALUE) .addData(HOST_NAME_KEY, HOSTNAME_VALUE).addData(SPAN_BUILDER_KEY, spanBuilder); final Context updatedContext = openTelemetryTracer.start(METHOD_NAME, traceContext, ProcessKind.SEND); assertSpanWithExplicitParent(updatedContext, parentSpanId); final ReadableSpan recordEventsSpan = (ReadableSpan) updatedContext.getData(PARENT_SPAN_KEY).get(); assertEquals(Span.Kind.PRODUCER, recordEventsSpan.toSpanData().getKind()); final Map<String, AttributeValue> attributeMap = recordEventsSpan.toSpanData().getAttributes(); verifySpanAttributes(attributeMap); }
class OpenTelemetryTracerTest { private static final String METHOD_NAME = "Azure.eventhubs.send"; private static final String HOSTNAME_VALUE = "testEventDataNameSpace.servicebus.windows.net"; private static final String ENTITY_PATH_VALUE = "test"; private static final String COMPONENT_VALUE = "eventhubs"; private OpenTelemetryTracer openTelemetryTracer; private Tracer tracer; private Context tracingContext; private Span parentSpan; @BeforeEach public void setUp() { System.out.println("Running: setUp"); openTelemetryTracer = new OpenTelemetryTracer(); tracer = OpenTelemetry.getTracerFactory().get("TracerSdkTest"); parentSpan = tracer.spanBuilder(PARENT_SPAN_KEY).startSpan(); tracer.withSpan(parentSpan); tracingContext = new Context(PARENT_SPAN_KEY, parentSpan); } @AfterEach public void tearDown() { System.out.println("Running: tearDown"); tracer = null; tracingContext = null; assertNull(tracer); assertNull(tracingContext); } @Test public void startSpanNullPointerException() { assertThrows(NullPointerException.class, () -> openTelemetryTracer.start("", null)); } @Test public void startSpanParentContextFlowTest() { final SpanId parentSpanId = parentSpan.getContext().getSpanId(); final Context updatedContext = openTelemetryTracer.start(METHOD_NAME, tracingContext); assertSpanWithExplicitParent(updatedContext, parentSpanId); final ReadableSpan recordEventsSpan = (ReadableSpan) updatedContext.getData(PARENT_SPAN_KEY).get(); assertEquals(Span.Kind.INTERNAL, recordEventsSpan.toSpanData().getKind()); } @Test public void startSpanTestNoUserParent() { final Context updatedContext = openTelemetryTracer.start(METHOD_NAME, Context.NONE); assertNotNull(updatedContext.getData(PARENT_SPAN_KEY)); assertTrue(updatedContext.getData(PARENT_SPAN_KEY).get() instanceof ReadableSpan); final ReadableSpan recordEventsSpan = (ReadableSpan) updatedContext.getData(PARENT_SPAN_KEY).get(); assertEquals(METHOD_NAME, recordEventsSpan.getName()); assertFalse(recordEventsSpan.getSpanContext().isRemote()); assertNotNull(recordEventsSpan.toSpanData().getParentSpanId()); } @Test @Test public void startSpanProcessKindMessage() { final SpanId parentSpanId = parentSpan.getContext().getSpanId(); final Context updatedContext = openTelemetryTracer.start(METHOD_NAME, tracingContext, ProcessKind.MESSAGE); assertSpanWithExplicitParent(updatedContext, parentSpanId); final ReadableSpan recordEventsSpan = (ReadableSpan) updatedContext.getData(PARENT_SPAN_KEY).get(); assertEquals(Span.Kind.INTERNAL, recordEventsSpan.toSpanData().getKind()); assertNotNull(updatedContext.getData(SPAN_CONTEXT_KEY).get()); assertNotNull(updatedContext.getData(DIAGNOSTIC_ID_KEY).get()); } @Test public void startSpanProcessKindProcess() { final SpanId parentSpanId = parentSpan.getContext().getSpanId(); final Context updatedContext = openTelemetryTracer.start(METHOD_NAME, tracingContext, ProcessKind.PROCESS); assertFalse(tracingContext.getData(SPAN_CONTEXT_KEY).isPresent(), "When no parent span passed in context information"); assertSpanWithExplicitParent(updatedContext, parentSpanId); assertNotNull(updatedContext.getData("scope").get()); final ReadableSpan recordEventsSpan = (ReadableSpan) updatedContext.getData(PARENT_SPAN_KEY).get(); assertEquals(Span.Kind.SERVER, recordEventsSpan.toSpanData().getKind()); } @Test public void getSpanBuilderTest() { final Context updatedContext = openTelemetryTracer.getSharedSpanBuilder(METHOD_NAME, Context.NONE); assertTrue(updatedContext.getData(SPAN_BUILDER_KEY).isPresent()); } @Test public void startProcessSpanWithRemoteParent() { final Span testSpan = tracer.spanBuilder("child-span").startSpan(); final SpanId testSpanId = testSpan.getContext().getSpanId(); final SpanContext spanContext = SpanContext.createFromRemoteParent( testSpan.getContext().getTraceId(), testSpan.getContext().getSpanId(), testSpan.getContext().getTraceFlags(), testSpan.getContext().getTracestate()); final Context traceContext = tracingContext.addData(SPAN_CONTEXT_KEY, spanContext); final Context updatedContext = openTelemetryTracer.start(METHOD_NAME, traceContext, ProcessKind.PROCESS); assertNotNull(updatedContext.getData("scope").get()); assertSpanWithRemoteParent(updatedContext, testSpanId); } @Test public void startSpanOverloadNullPointerException() { assertThrows(NullPointerException.class, () -> openTelemetryTracer.start("", Context.NONE, null)); } @Test public void addLinkTest() { Span.Builder span = tracer.spanBuilder("parent-span"); Span toLinkSpan = tracer.spanBuilder("new test span").startSpan(); Context spanContext = new Context( SPAN_CONTEXT_KEY, toLinkSpan.getContext()); SpanData.Link expectedLink = SpanData.Link.create(toLinkSpan.getContext()); openTelemetryTracer.addLink(spanContext.addData(SPAN_BUILDER_KEY, span)); ReadableSpan span1 = (ReadableSpan) span.startSpan(); Link createdLink = span1.toSpanData().getLinks().get(0); Assertions.assertEquals(1, span1.toSpanData().getLinks().size()); Assertions.assertEquals(expectedLink.getContext().getTraceId(), createdLink.getContext().getTraceId()); Assertions.assertEquals(expectedLink.getContext().getSpanId(), createdLink.getContext().getSpanId()); } @Test public void endSpanNoSuccessErrorMessageTest() { final ReadableSpan recordEventsSpan = (ReadableSpan) tracer.getCurrentSpan(); final String expectedStatus = "UNKNOWN"; openTelemetryTracer.end(null, null, tracingContext); assertEquals(expectedStatus, recordEventsSpan.toSpanData().getStatus().getCanonicalCode().name()); } @Test public void endSpanErrorMessageTest() { final ReadableSpan recordEventsSpan = (ReadableSpan) tracer.getCurrentSpan(); final String throwableMessage = "custom error message"; final String expectedStatus = "UNKNOWN"; openTelemetryTracer.end(null, new Throwable(throwableMessage), tracingContext); assertEquals(expectedStatus, recordEventsSpan.toSpanData().getStatus().getCanonicalCode().name()); assertEquals(throwableMessage, recordEventsSpan.toSpanData().getStatus().getDescription()); } @Test public void endSpanTestThrowableResponseCode() { final ReadableSpan recordEventsSpan = (ReadableSpan) tracer.getCurrentSpan(); final String throwableMessage = "Resource not found"; final String expectedStatus = "NOT_FOUND"; openTelemetryTracer.end(404, new Throwable(throwableMessage), tracingContext); assertEquals(expectedStatus, recordEventsSpan.toSpanData().getStatus().getCanonicalCode().name()); assertEquals(throwableMessage, recordEventsSpan.toSpanData().getStatus().getDescription()); } private static void assertSpanWithExplicitParent(Context updatedContext, SpanId parentSpanId) { assertNotNull(updatedContext.getData(PARENT_SPAN_KEY).get()); assertTrue(updatedContext.getData(PARENT_SPAN_KEY).get() instanceof ReadableSpan); final ReadableSpan recordEventsSpan = (ReadableSpan) updatedContext.getData(PARENT_SPAN_KEY).get(); assertEquals(METHOD_NAME, recordEventsSpan.getName()); assertFalse(recordEventsSpan.toSpanData().getHasRemoteParent()); assertEquals(parentSpanId, recordEventsSpan.toSpanData().getParentSpanId()); } private static void assertSpanWithRemoteParent(Context updatedContext, SpanId parentSpanId) { assertNotNull(updatedContext.getData(PARENT_SPAN_KEY).get()); assertTrue(updatedContext.getData(PARENT_SPAN_KEY).get() instanceof ReadableSpan); final ReadableSpan recordEventsSpan = (ReadableSpan) updatedContext.getData(PARENT_SPAN_KEY).get(); assertEquals(METHOD_NAME, recordEventsSpan.getName()); assertEquals(Span.Kind.SERVER, recordEventsSpan.toSpanData().getKind()); assertTrue(recordEventsSpan.toSpanData().getHasRemoteParent()); assertEquals(parentSpanId, recordEventsSpan.toSpanData().getParentSpanId()); } }
class OpenTelemetryTracerTest { private static final String METHOD_NAME = "EventHubs.send"; private static final String HOSTNAME_VALUE = "testEventDataNameSpace.servicebus.windows.net"; private static final String ENTITY_PATH_VALUE = "test"; private static final String COMPONENT_VALUE = "EventHubs"; private OpenTelemetryTracer openTelemetryTracer; private Tracer tracer; private Context tracingContext; private Span parentSpan; @BeforeEach public void setUp() { System.out.println("Running: setUp"); openTelemetryTracer = new OpenTelemetryTracer(); tracer = OpenTelemetry.getTracerFactory().get("TracerSdkTest"); parentSpan = tracer.spanBuilder(PARENT_SPAN_KEY).startSpan(); tracer.withSpan(parentSpan); tracingContext = new Context(PARENT_SPAN_KEY, parentSpan); } @AfterEach public void tearDown() { System.out.println("Running: tearDown"); tracer = null; tracingContext = null; assertNull(tracer); assertNull(tracingContext); } @Test public void startSpanNullPointerException() { assertThrows(NullPointerException.class, () -> openTelemetryTracer.start("", null)); } @Test public void startSpanParentContextFlowTest() { final SpanId parentSpanId = parentSpan.getContext().getSpanId(); final Context updatedContext = openTelemetryTracer.start(METHOD_NAME, tracingContext); assertSpanWithExplicitParent(updatedContext, parentSpanId); final ReadableSpan recordEventsSpan = (ReadableSpan) updatedContext.getData(PARENT_SPAN_KEY).get(); assertEquals(Span.Kind.INTERNAL, recordEventsSpan.toSpanData().getKind()); } @Test public void startSpanTestNoUserParent() { final Context updatedContext = openTelemetryTracer.start(METHOD_NAME, Context.NONE); assertNotNull(updatedContext.getData(PARENT_SPAN_KEY)); assertTrue(updatedContext.getData(PARENT_SPAN_KEY).get() instanceof ReadableSpan); final ReadableSpan recordEventsSpan = (ReadableSpan) updatedContext.getData(PARENT_SPAN_KEY).get(); assertEquals(METHOD_NAME, recordEventsSpan.getName()); assertFalse(recordEventsSpan.getSpanContext().isRemote()); assertNotNull(recordEventsSpan.toSpanData().getParentSpanId()); } @Test @Test public void startSpanProcessKindMessage() { final SpanId parentSpanId = parentSpan.getContext().getSpanId(); final Context updatedContext = openTelemetryTracer.start(METHOD_NAME, tracingContext, ProcessKind.MESSAGE); assertSpanWithExplicitParent(updatedContext, parentSpanId); final ReadableSpan recordEventsSpan = (ReadableSpan) updatedContext.getData(PARENT_SPAN_KEY).get(); assertEquals(Span.Kind.INTERNAL, recordEventsSpan.toSpanData().getKind()); assertNotNull(updatedContext.getData(SPAN_CONTEXT_KEY).get()); assertNotNull(updatedContext.getData(DIAGNOSTIC_ID_KEY).get()); } @Test public void startSpanProcessKindProcess() { final SpanId parentSpanId = parentSpan.getContext().getSpanId(); final Context traceContext = tracingContext.addData(ENTITY_PATH_KEY, ENTITY_PATH_VALUE) .addData(HOST_NAME_KEY, HOSTNAME_VALUE); final Context updatedContext = openTelemetryTracer.start(METHOD_NAME, traceContext, ProcessKind.PROCESS); assertFalse(tracingContext.getData(SPAN_CONTEXT_KEY).isPresent(), "When no parent span passed in context information"); assertSpanWithExplicitParent(updatedContext, parentSpanId); assertNotNull(updatedContext.getData("scope").get()); final ReadableSpan recordEventsSpan = (ReadableSpan) updatedContext.getData(PARENT_SPAN_KEY).get(); assertEquals(Span.Kind.SERVER, recordEventsSpan.toSpanData().getKind()); final Map<String, AttributeValue> attributeMap = recordEventsSpan.toSpanData().getAttributes(); verifySpanAttributes(attributeMap); } @Test public void getSpanBuilderTest() { final Context updatedContext = openTelemetryTracer.getSharedSpanBuilder(METHOD_NAME, Context.NONE); assertTrue(updatedContext.getData(SPAN_BUILDER_KEY).isPresent()); } @Test public void startProcessSpanWithRemoteParent() { final Span testSpan = tracer.spanBuilder("child-span").startSpan(); final SpanId testSpanId = testSpan.getContext().getSpanId(); final SpanContext spanContext = SpanContext.createFromRemoteParent( testSpan.getContext().getTraceId(), testSpan.getContext().getSpanId(), testSpan.getContext().getTraceFlags(), testSpan.getContext().getTracestate()); final Context traceContext = tracingContext.addData(SPAN_CONTEXT_KEY, spanContext); final Context updatedContext = openTelemetryTracer.start(METHOD_NAME, traceContext, ProcessKind.PROCESS); assertNotNull(updatedContext.getData("scope").get()); assertSpanWithRemoteParent(updatedContext, testSpanId); } @Test public void startSpanOverloadNullPointerException() { assertThrows(NullPointerException.class, () -> openTelemetryTracer.start("", Context.NONE, null)); } @Test public void addLinkTest() { Span.Builder span = tracer.spanBuilder("parent-span"); Span toLinkSpan = tracer.spanBuilder("new test span").startSpan(); Context spanContext = new Context( SPAN_CONTEXT_KEY, toLinkSpan.getContext()); SpanData.Link expectedLink = SpanData.Link.create(toLinkSpan.getContext()); openTelemetryTracer.addLink(spanContext.addData(SPAN_BUILDER_KEY, span)); ReadableSpan span1 = (ReadableSpan) span.startSpan(); Link createdLink = span1.toSpanData().getLinks().get(0); Assertions.assertEquals(1, span1.toSpanData().getLinks().size()); Assertions.assertEquals(expectedLink.getContext().getTraceId(), createdLink.getContext().getTraceId()); Assertions.assertEquals(expectedLink.getContext().getSpanId(), createdLink.getContext().getSpanId()); } @Test public void endSpanNoSuccessErrorMessageTest() { final ReadableSpan recordEventsSpan = (ReadableSpan) tracer.getCurrentSpan(); final String expectedStatus = "UNKNOWN"; openTelemetryTracer.end(null, null, tracingContext); assertEquals(expectedStatus, recordEventsSpan.toSpanData().getStatus().getCanonicalCode().name()); } @Test public void endSpanErrorMessageTest() { final ReadableSpan recordEventsSpan = (ReadableSpan) tracer.getCurrentSpan(); final String throwableMessage = "custom error message"; final String expectedStatus = "UNKNOWN"; openTelemetryTracer.end(null, new Throwable(throwableMessage), tracingContext); assertEquals(expectedStatus, recordEventsSpan.toSpanData().getStatus().getCanonicalCode().name()); assertEquals(throwableMessage, recordEventsSpan.toSpanData().getStatus().getDescription()); } @Test public void endSpanTestThrowableResponseCode() { final ReadableSpan recordEventsSpan = (ReadableSpan) tracer.getCurrentSpan(); final String throwableMessage = "Resource not found"; final String expectedStatus = "NOT_FOUND"; openTelemetryTracer.end(404, new Throwable(throwableMessage), tracingContext); assertEquals(expectedStatus, recordEventsSpan.toSpanData().getStatus().getCanonicalCode().name()); assertEquals(throwableMessage, recordEventsSpan.toSpanData().getStatus().getDescription()); } private static void assertSpanWithExplicitParent(Context updatedContext, SpanId parentSpanId) { assertNotNull(updatedContext.getData(PARENT_SPAN_KEY).get()); assertTrue(updatedContext.getData(PARENT_SPAN_KEY).get() instanceof ReadableSpan); final ReadableSpan recordEventsSpan = (ReadableSpan) updatedContext.getData(PARENT_SPAN_KEY).get(); assertEquals(METHOD_NAME, recordEventsSpan.getName()); assertFalse(recordEventsSpan.toSpanData().getHasRemoteParent()); assertEquals(parentSpanId, recordEventsSpan.toSpanData().getParentSpanId()); } private static void assertSpanWithRemoteParent(Context updatedContext, SpanId parentSpanId) { assertNotNull(updatedContext.getData(PARENT_SPAN_KEY).get()); assertTrue(updatedContext.getData(PARENT_SPAN_KEY).get() instanceof ReadableSpan); final ReadableSpan recordEventsSpan = (ReadableSpan) updatedContext.getData(PARENT_SPAN_KEY).get(); assertEquals(METHOD_NAME, recordEventsSpan.getName()); assertEquals(Span.Kind.SERVER, recordEventsSpan.toSpanData().getKind()); assertTrue(recordEventsSpan.toSpanData().getHasRemoteParent()); assertEquals(parentSpanId, recordEventsSpan.toSpanData().getParentSpanId()); } private static void verifySpanAttributes(Map<String, AttributeValue> attributeMap) { assertEquals(attributeMap.get(COMPONENT), AttributeValue.stringAttributeValue(COMPONENT_VALUE)); assertEquals(attributeMap.get(MESSAGE_BUS_DESTINATION), AttributeValue.stringAttributeValue(ENTITY_PATH_VALUE)); assertEquals(attributeMap.get(PEER_ENDPOINT), AttributeValue.stringAttributeValue(HOSTNAME_VALUE)); } }
this indentation is off
public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes)" + " is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider)); })); }
final int maximumLinkSize = size > 0
public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider)); })); }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); /** * Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service * load balance messages is the eventHubName. */ private final ConcurrentHashMap<String, AmqpSendLink> openLinks = new ConcurrentHashMap<>(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnection connection; private final AmqpRetryOptions retryOptions; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final boolean isSharedConnection; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnection connection, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, boolean isSharedConnection) { this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.eventHubName = eventHubName; this.connection = connection; this.retryOptions = retryOptions; this.tracerProvider = tracerProvider; this.messageSerializer = messageSerializer; this.isSharedConnection = isSharedConnection; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connection.getManagementNode().flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.info("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.info("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.info("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; final AtomicReference<Context> sharedContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; final AtomicReference<Boolean> isFirst = new AtomicReference<>(true); final List<Message> messages = batch.getEvents().stream().map(event -> { final Message message = messageSerializer.serialize(event); if (isTracingEnabled) { if (isFirst.getAndSet(false)) { parentContext.set(event.getContext()); sharedContext.set(tracerProvider.getSharedSpanBuilder(parentContext.get())); } Context sharedSpanBuilderContext = sharedContext.get(); tracerProvider.addSpanLinks(sharedSpanBuilderContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; }).collect(Collectors.toList()); return getSendLink(batch.getPartitionId()) .flatMap(link -> { if (isTracingEnabled) { Context userSpanContext = sharedContext.get(); Context entityContext = userSpanContext.addData(ENTITY_PATH_KEY, link.getEntityPath()); parentContext.set(tracerProvider.startSpan( entityContext.addData(HOST_NAME_KEY, link.getHostname()), ProcessKind.SEND)); } return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); }).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions().setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private String getLinkName(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? StringUtil.getRandomString("EC") : StringUtil.getRandomString("PS"); } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final AmqpSendLink openLink = openLinks.get(entityPath); if (openLink != null) { return Mono.just(openLink); } else { return connection.createSendLink(getLinkName(partitionId), entityPath, retryOptions) .map(link -> openLinks.computeIfAbsent(entityPath, unusedKey -> link)); } } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } openLinks.forEach((key, value) -> value.close()); openLinks.clear(); if (!isSharedConnection) { connection.close(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); /** * Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service * load balance messages is the eventHubName. */ private final ConcurrentHashMap<String, AmqpSendLink> openLinks = new ConcurrentHashMap<>(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnection connection; private final AmqpRetryOptions retryOptions; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final boolean isSharedConnection; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnection connection, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, boolean isSharedConnection) { this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.eventHubName = eventHubName; this.connection = connection; this.retryOptions = retryOptions; this.tracerProvider = tracerProvider; this.messageSerializer = messageSerializer; this.isSharedConnection = isSharedConnection; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * * @return The set of information for the requested partition under the Event Hub this client is associated with. * * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connection.getManagementNode().flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * * @throws NullPointerException if {@code options} is null. */ /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.info("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.info("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.info("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } messages.add(message); } Context finalSharedContext = sharedContext; return getSendLink(batch.getPartitionId()) .flatMap(link -> { if (isTracingEnabled) { Context entityContext = finalSharedContext.addData(ENTITY_PATH_KEY, link.getEntityPath()); parentContext.set(tracerProvider.startSpan( entityContext.addData(HOST_NAME_KEY, link.getHostname()), ProcessKind.SEND)); } return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); }).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private String getLinkName(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? StringUtil.getRandomString("EC") : StringUtil.getRandomString("PS"); } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final AmqpSendLink openLink = openLinks.get(entityPath); if (openLink != null) { return Mono.just(openLink); } else { return connection.createSendLink(getLinkName(partitionId), entityPath, retryOptions) .map(link -> openLinks.computeIfAbsent(entityPath, unusedKey -> link)); } } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } openLinks.forEach((key, value) -> value.close()); openLinks.clear(); if (!isSharedConnection) { connection.close(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
Consider using a `for` loop rather than the `stream().map()` so you don't need the "isFirst" variable and the parentContext and sharedContext do not have to be atomicreferences.
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.info("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.info("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.info("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; final AtomicReference<Context> sharedContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; final AtomicReference<Boolean> isFirst = new AtomicReference<>(true); final List<Message> messages = batch.getEvents().stream().map(event -> { final Message message = messageSerializer.serialize(event); if (isTracingEnabled) { if (isFirst.getAndSet(false)) { parentContext.set(event.getContext()); sharedContext.set(tracerProvider.getSharedSpanBuilder(parentContext.get())); } Context sharedSpanBuilderContext = sharedContext.get(); tracerProvider.addSpanLinks(sharedSpanBuilderContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; }).collect(Collectors.toList()); return getSendLink(batch.getPartitionId()) .flatMap(link -> { if (isTracingEnabled) { Context userSpanContext = sharedContext.get(); Context entityContext = userSpanContext.addData(ENTITY_PATH_KEY, link.getEntityPath()); parentContext.set(tracerProvider.startSpan( entityContext.addData(HOST_NAME_KEY, link.getHostname()), ProcessKind.SEND)); } return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); }).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); }
if (!CoreUtils.isNullOrEmpty(partitionKey)) {
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.info("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.info("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.info("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } messages.add(message); } Context finalSharedContext = sharedContext; return getSendLink(batch.getPartitionId()) .flatMap(link -> { if (isTracingEnabled) { Context entityContext = finalSharedContext.addData(ENTITY_PATH_KEY, link.getEntityPath()); parentContext.set(tracerProvider.startSpan( entityContext.addData(HOST_NAME_KEY, link.getHostname()), ProcessKind.SEND)); } return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); }).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); /** * Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service * load balance messages is the eventHubName. */ private final ConcurrentHashMap<String, AmqpSendLink> openLinks = new ConcurrentHashMap<>(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnection connection; private final AmqpRetryOptions retryOptions; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final boolean isSharedConnection; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnection connection, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, boolean isSharedConnection) { this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.eventHubName = eventHubName; this.connection = connection; this.retryOptions = retryOptions; this.tracerProvider = tracerProvider; this.messageSerializer = messageSerializer; this.isSharedConnection = isSharedConnection; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connection.getManagementNode().flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes)" + " is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions().setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private String getLinkName(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? StringUtil.getRandomString("EC") : StringUtil.getRandomString("PS"); } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final AmqpSendLink openLink = openLinks.get(entityPath); if (openLink != null) { return Mono.just(openLink); } else { return connection.createSendLink(getLinkName(partitionId), entityPath, retryOptions) .map(link -> openLinks.computeIfAbsent(entityPath, unusedKey -> link)); } } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } openLinks.forEach((key, value) -> value.close()); openLinks.clear(); if (!isSharedConnection) { connection.close(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); /** * Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service * load balance messages is the eventHubName. */ private final ConcurrentHashMap<String, AmqpSendLink> openLinks = new ConcurrentHashMap<>(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnection connection; private final AmqpRetryOptions retryOptions; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final boolean isSharedConnection; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnection connection, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, boolean isSharedConnection) { this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.eventHubName = eventHubName; this.connection = connection; this.retryOptions = retryOptions; this.tracerProvider = tracerProvider; this.messageSerializer = messageSerializer; this.isSharedConnection = isSharedConnection; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * * @return The set of information for the requested partition under the Event Hub this client is associated with. * * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connection.getManagementNode().flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private String getLinkName(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? StringUtil.getRandomString("EC") : StringUtil.getRandomString("PS"); } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final AmqpSendLink openLink = openLinks.get(entityPath); if (openLink != null) { return Mono.just(openLink); } else { return connection.createSendLink(getLinkName(partitionId), entityPath, retryOptions) .map(link -> openLinks.computeIfAbsent(entityPath, unusedKey -> link)); } } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } openLinks.forEach((key, value) -> value.close()); openLinks.clear(); if (!isSharedConnection) { connection.close(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
I don't think we can avoid the `isFirst` because we don't want to send the request to `getSharedSpanBuilder` for every event, we can just do that for one event and use that shared builder for all others. Also, `parentContext ` would still have to be an AtomicReference as it is used [here](https://github.com/Azure/azure-sdk-for-java/pull/6773/files#diff-afcf12d6b6264dbb96ae63baef89082dR423) in this lambda.
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.info("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.info("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.info("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; final AtomicReference<Context> sharedContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; final AtomicReference<Boolean> isFirst = new AtomicReference<>(true); final List<Message> messages = batch.getEvents().stream().map(event -> { final Message message = messageSerializer.serialize(event); if (isTracingEnabled) { if (isFirst.getAndSet(false)) { parentContext.set(event.getContext()); sharedContext.set(tracerProvider.getSharedSpanBuilder(parentContext.get())); } Context sharedSpanBuilderContext = sharedContext.get(); tracerProvider.addSpanLinks(sharedSpanBuilderContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; }).collect(Collectors.toList()); return getSendLink(batch.getPartitionId()) .flatMap(link -> { if (isTracingEnabled) { Context userSpanContext = sharedContext.get(); Context entityContext = userSpanContext.addData(ENTITY_PATH_KEY, link.getEntityPath()); parentContext.set(tracerProvider.startSpan( entityContext.addData(HOST_NAME_KEY, link.getHostname()), ProcessKind.SEND)); } return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); }).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); }
if (!CoreUtils.isNullOrEmpty(partitionKey)) {
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.info("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.info("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.info("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } messages.add(message); } Context finalSharedContext = sharedContext; return getSendLink(batch.getPartitionId()) .flatMap(link -> { if (isTracingEnabled) { Context entityContext = finalSharedContext.addData(ENTITY_PATH_KEY, link.getEntityPath()); parentContext.set(tracerProvider.startSpan( entityContext.addData(HOST_NAME_KEY, link.getHostname()), ProcessKind.SEND)); } return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); }).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); /** * Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service * load balance messages is the eventHubName. */ private final ConcurrentHashMap<String, AmqpSendLink> openLinks = new ConcurrentHashMap<>(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnection connection; private final AmqpRetryOptions retryOptions; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final boolean isSharedConnection; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnection connection, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, boolean isSharedConnection) { this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.eventHubName = eventHubName; this.connection = connection; this.retryOptions = retryOptions; this.tracerProvider = tracerProvider; this.messageSerializer = messageSerializer; this.isSharedConnection = isSharedConnection; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connection.getManagementNode().flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes)" + " is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions().setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private String getLinkName(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? StringUtil.getRandomString("EC") : StringUtil.getRandomString("PS"); } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final AmqpSendLink openLink = openLinks.get(entityPath); if (openLink != null) { return Mono.just(openLink); } else { return connection.createSendLink(getLinkName(partitionId), entityPath, retryOptions) .map(link -> openLinks.computeIfAbsent(entityPath, unusedKey -> link)); } } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } openLinks.forEach((key, value) -> value.close()); openLinks.clear(); if (!isSharedConnection) { connection.close(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); /** * Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service * load balance messages is the eventHubName. */ private final ConcurrentHashMap<String, AmqpSendLink> openLinks = new ConcurrentHashMap<>(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnection connection; private final AmqpRetryOptions retryOptions; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final boolean isSharedConnection; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnection connection, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, boolean isSharedConnection) { this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.eventHubName = eventHubName; this.connection = connection; this.retryOptions = retryOptions; this.tracerProvider = tracerProvider; this.messageSerializer = messageSerializer; this.isSharedConnection = isSharedConnection; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * * @return The set of information for the requested partition under the Event Hub this client is associated with. * * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connection.getManagementNode().flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private String getLinkName(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? StringUtil.getRandomString("EC") : StringUtil.getRandomString("PS"); } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final AmqpSendLink openLink = openLinks.get(entityPath); if (openLink != null) { return Mono.just(openLink); } else { return connection.createSendLink(getLinkName(partitionId), entityPath, retryOptions) .map(link -> openLinks.computeIfAbsent(entityPath, unusedKey -> link)); } } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } openLinks.forEach((key, value) -> value.close()); openLinks.clear(); if (!isSharedConnection) { connection.close(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
You'll have to throw this so you don't need to return context.none
public Context getSharedSpanBuilder(String spanName, Context context) { logger.logExceptionAsError( new UnsupportedOperationException("This operation is not supported in OpenCensus")); return Context.NONE; }
logger.logExceptionAsError(
public Context getSharedSpanBuilder(String spanName, Context context) { throw logger.logExceptionAsError( new UnsupportedOperationException("This operation is not supported in OpenCensus")); }
class OpenCensusTracer implements com.azure.core.util.tracing.Tracer { private static final Tracer TRACER = Tracing.getTracer(); static final String COMPONENT = "component"; static final String MESSAGE_BUS_DESTINATION = "message_bus.destination"; static final String PEER_ENDPOINT = "peer.address"; private final ClientLogger logger = new ClientLogger(OpenCensusTracer.class); /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); SpanBuilder spanBuilder = getSpanBuilder(spanName, context); Span span = spanBuilder.startSpan(); return context.addData(PARENT_SPAN_KEY, span); } /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context, ProcessKind processKind) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); Objects.requireNonNull(processKind, "'processKind' cannot be null."); Span span; SpanBuilder spanBuilder; switch (processKind) { case SEND: spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.setSpanKind(Span.Kind.CLIENT).startSpan(); if (span.getOptions().contains(Span.Options.RECORD_EVENTS)) { addSpanRequestAttributes(span, context, spanName); } return context.addData(PARENT_SPAN_KEY, span); case MESSAGE: spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.startSpan(); context = setContextData(span); return context.addData(PARENT_SPAN_KEY, span); case PROCESS: return startScopedSpan(spanName, context); default: return Context.NONE; } } /** * {@inheritDoc} */ @Override public void end(int responseCode, Throwable throwable, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span == null) { return; } if (span.getOptions().contains(Options.RECORD_EVENTS)) { span.setStatus(HttpTraceUtil.parseResponseStatus(responseCode, throwable)); } span.end(); } /** * {@inheritDoc} */ @Override public void setAttribute(String key, String value, Context context) { if (CoreUtils.isNullOrEmpty(value)) { logger.info("Failed to set span attribute since value is null or empty."); return; } final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span != null) { span.putAttribute(key, AttributeValue.stringAttributeValue(value)); } else { logger.warning("Failed to find span to add attribute."); } } /** * {@inheritDoc} */ @Override public Context setSpanName(String spanName, Context context) { return context.addData(USER_SPAN_NAME_KEY, spanName); } /** * {@inheritDoc} */ @Override public void end(String statusMessage, Throwable throwable, Context context) { final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span == null) { logger.warning("Failed to find span to end it."); return; } if (span.getOptions().contains(Options.RECORD_EVENTS)) { span.setStatus(AmqpTraceUtil.parseStatusMessage(statusMessage, throwable)); } span.end(); } /** * {@inheritDoc} */ @Override public void addLink(Context context) { final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span == null) { logger.warning("Failed to find span to link it."); return; } final SpanContext spanContext = getOrDefault(context, SPAN_CONTEXT_KEY, null, SpanContext.class); if (spanContext == null) { logger.warning("Failed to find span context to link it."); return; } span.addLink(Link.fromSpanContext(spanContext, PARENT_LINKED_SPAN)); } /** * {@inheritDoc} */ @Override public Context extractContext(String diagnosticId, Context context) { return AmqpPropagationFormatUtil.extractContext(diagnosticId, context); } @Override /** * Starts a new child {@link Span} with parent being the remote and uses the {@link Span} is in the current Context, * to return an object that represents that scope. * <p>The scope is exited when the returned object is closed.</p> * * @param spanName The name of the returned Span. * @param context The {@link Context} containing the {@link SpanContext}. * @return The returned {@link Span} and the scope in a {@link Context} object. */ private Context startScopedSpan(String spanName, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); Span span; SpanContext spanContext = getOrDefault(context, SPAN_CONTEXT_KEY, null, SpanContext.class); if (spanContext != null) { span = startSpanWithRemoteParent(spanName, spanContext); } else { SpanBuilder spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.setSpanKind(Span.Kind.SERVER).startSpan(); } return context.addData(PARENT_SPAN_KEY, span).addData("scope", TRACER.withSpan(span)); } /** * Creates a {@link SpanBuilder} to create and start a new child {@link Span} with parent being the remote and * designated by the {@link SpanContext}. * * @param spanName The name of the returned Span. * @param spanContext The remote parent context of the returned Span. * @return A {@link Span} with parent being the remote {@link Span} designated by the {@link SpanContext}. */ private Span startSpanWithRemoteParent(String spanName, SpanContext spanContext) { SpanBuilder spanBuilder = TRACER.spanBuilderWithRemoteParent(spanName, spanContext); spanBuilder.setSpanKind(Span.Kind.SERVER); return spanBuilder.startSpan(); } /** * Extracts the {@link SpanContext trace identifiers} and the {@link SpanContext} of the current tracing span as * text and returns in a {@link Context} object. * * @param span The current tracing span. * @return The {@link Context} containing the {@link SpanContext} and traceparent of the current span. */ private Context setContextData(Span span) { SpanContext spanContext = span.getContext(); final String traceparent = AmqpPropagationFormatUtil.getDiagnosticId(spanContext); return new Context(DIAGNOSTIC_ID_KEY, traceparent).addData(SPAN_CONTEXT_KEY, spanContext); } /** * Extracts request attributes from the given {@code context} and adds it to the started span. * * @param span The span to which request attributes are to be added. * @param context The context containing the request attributes. * @param spanName The name of the returned Span containing the component value. */ private void addSpanRequestAttributes(Span span, Context context, String spanName) { Objects.requireNonNull(span, "'span' cannot be null."); span.putAttribute(COMPONENT, AttributeValue.stringAttributeValue(parseComponentValue(spanName))); span.putAttribute( MESSAGE_BUS_DESTINATION, AttributeValue.stringAttributeValue(getOrDefault(context, ENTITY_PATH_KEY, "", String.class))); span.putAttribute( PEER_ENDPOINT, AttributeValue.stringAttributeValue(getOrDefault(context, HOST_NAME_KEY, "", String.class))); } /** * Extracts the component name from the given span name. * * @param spanName The spanName containing the component name. * @return The component name contained in the context. */ private static String parseComponentValue(String spanName) { if (spanName != null && spanName.length() > 0) { int componentNameEndIndex = spanName.lastIndexOf("."); if (componentNameEndIndex != -1) { return spanName.substring(0, componentNameEndIndex); } } return ""; } /** * Returns a {@link SpanBuilder} to create and start a new child {@link Span} with parent being the designated * {@code Span}. * * @param spanName The name of the returned Span. * @param context The context containing the span and the span name. * @return A {@link SpanBuilder} to create and start a new {@link Span}. */ private SpanBuilder getSpanBuilder(String spanName, Context context) { Span parentSpan = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); String spanNameKey = getOrDefault(context, USER_SPAN_NAME_KEY, null, String.class); if (spanNameKey == null) { spanNameKey = spanName; } if (parentSpan == null) { parentSpan = TRACER.getCurrentSpan(); } return TRACER.spanBuilderWithExplicitParent(spanNameKey, parentSpan); } /** * Returns the value of the specified key from the context. * * @param key The name of the attribute that needs to be extracted from the {@code Context}. * @param defaultValue the value to return in data not found. * @param clazz clazz the type of raw class to find data for. * @param context The context containing the specified key. * * @return The T type of raw class object */ @SuppressWarnings("unchecked") private <T> T getOrDefault(Context context, String key, T defaultValue, Class<T> clazz) { final Optional<Object> optional = context.getData(key); final Object result = optional.filter(value -> clazz.isAssignableFrom(value.getClass())).orElseGet(() -> { logger.warning("Could not extract key '{}' of type '{}' from context.", key, clazz); return defaultValue; }); return (T) result; } }
class OpenCensusTracer implements com.azure.core.util.tracing.Tracer { private static final Tracer TRACER = Tracing.getTracer(); static final String COMPONENT = "component"; static final String MESSAGE_BUS_DESTINATION = "message_bus.destination"; static final String PEER_ENDPOINT = "peer.address"; private final ClientLogger logger = new ClientLogger(OpenCensusTracer.class); /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); SpanBuilder spanBuilder = getSpanBuilder(spanName, context); Span span = spanBuilder.startSpan(); return context.addData(PARENT_SPAN_KEY, span); } /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context, ProcessKind processKind) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); Objects.requireNonNull(processKind, "'processKind' cannot be null."); Span span; SpanBuilder spanBuilder; switch (processKind) { case SEND: spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.setSpanKind(Span.Kind.CLIENT).startSpan(); if (span.getOptions().contains(Span.Options.RECORD_EVENTS)) { addSpanRequestAttributes(span, context, spanName); } return context.addData(PARENT_SPAN_KEY, span); case MESSAGE: spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.startSpan(); context = setContextData(span); return context.addData(PARENT_SPAN_KEY, span); case PROCESS: return startScopedSpan(spanName, context); default: return Context.NONE; } } /** * {@inheritDoc} */ @Override public void end(int responseCode, Throwable throwable, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span == null) { return; } if (span.getOptions().contains(Options.RECORD_EVENTS)) { span.setStatus(HttpTraceUtil.parseResponseStatus(responseCode, throwable)); } span.end(); } /** * {@inheritDoc} */ @Override public void setAttribute(String key, String value, Context context) { if (CoreUtils.isNullOrEmpty(value)) { logger.info("Failed to set span attribute since value is null or empty."); return; } final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span != null) { span.putAttribute(key, AttributeValue.stringAttributeValue(value)); } else { logger.warning("Failed to find span to add attribute."); } } /** * {@inheritDoc} */ @Override public Context setSpanName(String spanName, Context context) { return context.addData(USER_SPAN_NAME_KEY, spanName); } /** * {@inheritDoc} */ @Override public void end(String statusMessage, Throwable throwable, Context context) { final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span == null) { logger.warning("Failed to find span to end it."); return; } if (span.getOptions().contains(Options.RECORD_EVENTS)) { span.setStatus(AmqpTraceUtil.parseStatusMessage(statusMessage, throwable)); } span.end(); } /** * {@inheritDoc} */ @Override public void addLink(Context context) { final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span == null) { logger.warning("Failed to find span to link it."); return; } final SpanContext spanContext = getOrDefault(context, SPAN_CONTEXT_KEY, null, SpanContext.class); if (spanContext == null) { logger.warning("Failed to find span context to link it."); return; } span.addLink(Link.fromSpanContext(spanContext, PARENT_LINKED_SPAN)); } /** * {@inheritDoc} */ @Override public Context extractContext(String diagnosticId, Context context) { return AmqpPropagationFormatUtil.extractContext(diagnosticId, context); } @Override /** * Starts a new child {@link Span} with parent being the remote and uses the {@link Span} is in the current Context, * to return an object that represents that scope. * <p>The scope is exited when the returned object is closed.</p> * * @param spanName The name of the returned Span. * @param context The {@link Context} containing the {@link SpanContext}. * @return The returned {@link Span} and the scope in a {@link Context} object. */ private Context startScopedSpan(String spanName, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); Span span; SpanContext spanContext = getOrDefault(context, SPAN_CONTEXT_KEY, null, SpanContext.class); if (spanContext != null) { span = startSpanWithRemoteParent(spanName, spanContext); } else { SpanBuilder spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.setSpanKind(Span.Kind.SERVER).startSpan(); } return context.addData(PARENT_SPAN_KEY, span).addData("scope", TRACER.withSpan(span)); } /** * Creates a {@link SpanBuilder} to create and start a new child {@link Span} with parent being the remote and * designated by the {@link SpanContext}. * * @param spanName The name of the returned Span. * @param spanContext The remote parent context of the returned Span. * @return A {@link Span} with parent being the remote {@link Span} designated by the {@link SpanContext}. */ private Span startSpanWithRemoteParent(String spanName, SpanContext spanContext) { SpanBuilder spanBuilder = TRACER.spanBuilderWithRemoteParent(spanName, spanContext); spanBuilder.setSpanKind(Span.Kind.SERVER); return spanBuilder.startSpan(); } /** * Extracts the {@link SpanContext trace identifiers} and the {@link SpanContext} of the current tracing span as * text and returns in a {@link Context} object. * * @param span The current tracing span. * @return The {@link Context} containing the {@link SpanContext} and traceparent of the current span. */ private Context setContextData(Span span) { SpanContext spanContext = span.getContext(); final String traceparent = AmqpPropagationFormatUtil.getDiagnosticId(spanContext); return new Context(DIAGNOSTIC_ID_KEY, traceparent).addData(SPAN_CONTEXT_KEY, spanContext); } /** * Extracts request attributes from the given {@code context} and adds it to the started span. * * @param span The span to which request attributes are to be added. * @param context The context containing the request attributes. * @param spanName The name of the returned Span containing the component value. */ private void addSpanRequestAttributes(Span span, Context context, String spanName) { Objects.requireNonNull(span, "'span' cannot be null."); span.putAttribute(COMPONENT, AttributeValue.stringAttributeValue(parseComponentValue(spanName))); span.putAttribute( MESSAGE_BUS_DESTINATION, AttributeValue.stringAttributeValue(getOrDefault(context, ENTITY_PATH_KEY, "", String.class))); span.putAttribute( PEER_ENDPOINT, AttributeValue.stringAttributeValue(getOrDefault(context, HOST_NAME_KEY, "", String.class))); } /** * Extracts the component name from the given span name. * * @param spanName The spanName containing the component name. * @return The component name contained in the context. */ private static String parseComponentValue(String spanName) { if (spanName != null && spanName.length() > 0) { int componentNameEndIndex = spanName.lastIndexOf("."); if (componentNameEndIndex != -1) { return spanName.substring(0, componentNameEndIndex); } } return ""; } /** * Returns a {@link SpanBuilder} to create and start a new child {@link Span} with parent being the designated * {@code Span}. * * @param spanName The name of the returned Span. * @param context The context containing the span and the span name. * @return A {@link SpanBuilder} to create and start a new {@link Span}. */ private SpanBuilder getSpanBuilder(String spanName, Context context) { Span parentSpan = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); String spanNameKey = getOrDefault(context, USER_SPAN_NAME_KEY, null, String.class); if (spanNameKey == null) { spanNameKey = spanName; } if (parentSpan == null) { parentSpan = TRACER.getCurrentSpan(); } return TRACER.spanBuilderWithExplicitParent(spanNameKey, parentSpan); } /** * Returns the value of the specified key from the context. * * @param key The name of the attribute that needs to be extracted from the {@code Context}. * @param defaultValue the value to return in data not found. * @param clazz clazz the type of raw class to find data for. * @param context The context containing the specified key. * * @return The T type of raw class object */ @SuppressWarnings("unchecked") private <T> T getOrDefault(Context context, String key, T defaultValue, Class<T> clazz) { final Optional<Object> optional = context.getData(key); final Object result = optional.filter(value -> clazz.isAssignableFrom(value.getClass())).orElseGet(() -> { logger.warning("Could not extract key '{}' of type '{}' from context.", key, clazz); return defaultValue; }); return (T) result; } }
I'm fairly confident this works: ```java Context parentContext = null; Context sharedContext = null; List<Message> messages = new ArrayList<>(); for (int = 0; i < batch.getEvents().size(); i++) { final EventData = batch.getEvents().get(i); if (isTracingEnabled) { parentContext = event.getContext(); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } Context sharedSpanBuilderContext = sharedContext.get(); tracerProvider.addSpanLinks(sharedSpanBuilderContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } final Message message = messageSerializer.serialize(event); messages.add(message); } ```
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.info("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.info("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.info("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; final AtomicReference<Context> sharedContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; final AtomicReference<Boolean> isFirst = new AtomicReference<>(true); final List<Message> messages = batch.getEvents().stream().map(event -> { final Message message = messageSerializer.serialize(event); if (isTracingEnabled) { if (isFirst.getAndSet(false)) { parentContext.set(event.getContext()); sharedContext.set(tracerProvider.getSharedSpanBuilder(parentContext.get())); } Context sharedSpanBuilderContext = sharedContext.get(); tracerProvider.addSpanLinks(sharedSpanBuilderContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; }).collect(Collectors.toList()); return getSendLink(batch.getPartitionId()) .flatMap(link -> { if (isTracingEnabled) { Context userSpanContext = sharedContext.get(); Context entityContext = userSpanContext.addData(ENTITY_PATH_KEY, link.getEntityPath()); parentContext.set(tracerProvider.startSpan( entityContext.addData(HOST_NAME_KEY, link.getHostname()), ProcessKind.SEND)); } return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); }).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); }
if (!CoreUtils.isNullOrEmpty(partitionKey)) {
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.info("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.info("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.info("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } messages.add(message); } Context finalSharedContext = sharedContext; return getSendLink(batch.getPartitionId()) .flatMap(link -> { if (isTracingEnabled) { Context entityContext = finalSharedContext.addData(ENTITY_PATH_KEY, link.getEntityPath()); parentContext.set(tracerProvider.startSpan( entityContext.addData(HOST_NAME_KEY, link.getHostname()), ProcessKind.SEND)); } return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); }).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); /** * Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service * load balance messages is the eventHubName. */ private final ConcurrentHashMap<String, AmqpSendLink> openLinks = new ConcurrentHashMap<>(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnection connection; private final AmqpRetryOptions retryOptions; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final boolean isSharedConnection; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnection connection, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, boolean isSharedConnection) { this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.eventHubName = eventHubName; this.connection = connection; this.retryOptions = retryOptions; this.tracerProvider = tracerProvider; this.messageSerializer = messageSerializer; this.isSharedConnection = isSharedConnection; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connection.getManagementNode().flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes)" + " is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions().setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private String getLinkName(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? StringUtil.getRandomString("EC") : StringUtil.getRandomString("PS"); } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final AmqpSendLink openLink = openLinks.get(entityPath); if (openLink != null) { return Mono.just(openLink); } else { return connection.createSendLink(getLinkName(partitionId), entityPath, retryOptions) .map(link -> openLinks.computeIfAbsent(entityPath, unusedKey -> link)); } } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } openLinks.forEach((key, value) -> value.close()); openLinks.clear(); if (!isSharedConnection) { connection.close(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); /** * Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service * load balance messages is the eventHubName. */ private final ConcurrentHashMap<String, AmqpSendLink> openLinks = new ConcurrentHashMap<>(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnection connection; private final AmqpRetryOptions retryOptions; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final boolean isSharedConnection; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnection connection, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, boolean isSharedConnection) { this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.eventHubName = eventHubName; this.connection = connection; this.retryOptions = retryOptions; this.tracerProvider = tracerProvider; this.messageSerializer = messageSerializer; this.isSharedConnection = isSharedConnection; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * * @return The set of information for the requested partition under the Event Hub this client is associated with. * * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connection.getManagementNode().flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private String getLinkName(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? StringUtil.getRandomString("EC") : StringUtil.getRandomString("PS"); } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final AmqpSendLink openLink = openLinks.get(entityPath); if (openLink != null) { return Mono.just(openLink); } else { return connection.createSendLink(getLinkName(partitionId), entityPath, retryOptions) .map(link -> openLinks.computeIfAbsent(entityPath, unusedKey -> link)); } } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } openLinks.forEach((key, value) -> value.close()); openLinks.clear(); if (!isSharedConnection) { connection.close(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
We are using sharedContext and parentContext in the lambda expressions below and so they need to be `effectively final`.
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.info("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.info("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.info("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; final AtomicReference<Context> sharedContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; final AtomicReference<Boolean> isFirst = new AtomicReference<>(true); final List<Message> messages = batch.getEvents().stream().map(event -> { final Message message = messageSerializer.serialize(event); if (isTracingEnabled) { if (isFirst.getAndSet(false)) { parentContext.set(event.getContext()); sharedContext.set(tracerProvider.getSharedSpanBuilder(parentContext.get())); } Context sharedSpanBuilderContext = sharedContext.get(); tracerProvider.addSpanLinks(sharedSpanBuilderContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; }).collect(Collectors.toList()); return getSendLink(batch.getPartitionId()) .flatMap(link -> { if (isTracingEnabled) { Context userSpanContext = sharedContext.get(); Context entityContext = userSpanContext.addData(ENTITY_PATH_KEY, link.getEntityPath()); parentContext.set(tracerProvider.startSpan( entityContext.addData(HOST_NAME_KEY, link.getHostname()), ProcessKind.SEND)); } return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); }).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); }
if (!CoreUtils.isNullOrEmpty(partitionKey)) {
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.info("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.info("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.info("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } messages.add(message); } Context finalSharedContext = sharedContext; return getSendLink(batch.getPartitionId()) .flatMap(link -> { if (isTracingEnabled) { Context entityContext = finalSharedContext.addData(ENTITY_PATH_KEY, link.getEntityPath()); parentContext.set(tracerProvider.startSpan( entityContext.addData(HOST_NAME_KEY, link.getHostname()), ProcessKind.SEND)); } return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); }).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); /** * Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service * load balance messages is the eventHubName. */ private final ConcurrentHashMap<String, AmqpSendLink> openLinks = new ConcurrentHashMap<>(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnection connection; private final AmqpRetryOptions retryOptions; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final boolean isSharedConnection; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnection connection, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, boolean isSharedConnection) { this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.eventHubName = eventHubName; this.connection = connection; this.retryOptions = retryOptions; this.tracerProvider = tracerProvider; this.messageSerializer = messageSerializer; this.isSharedConnection = isSharedConnection; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connection.getManagementNode().flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes)" + " is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions().setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private String getLinkName(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? StringUtil.getRandomString("EC") : StringUtil.getRandomString("PS"); } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final AmqpSendLink openLink = openLinks.get(entityPath); if (openLink != null) { return Mono.just(openLink); } else { return connection.createSendLink(getLinkName(partitionId), entityPath, retryOptions) .map(link -> openLinks.computeIfAbsent(entityPath, unusedKey -> link)); } } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } openLinks.forEach((key, value) -> value.close()); openLinks.clear(); if (!isSharedConnection) { connection.close(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); /** * Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service * load balance messages is the eventHubName. */ private final ConcurrentHashMap<String, AmqpSendLink> openLinks = new ConcurrentHashMap<>(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnection connection; private final AmqpRetryOptions retryOptions; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final boolean isSharedConnection; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnection connection, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, boolean isSharedConnection) { this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.eventHubName = eventHubName; this.connection = connection; this.retryOptions = retryOptions; this.tracerProvider = tracerProvider; this.messageSerializer = messageSerializer; this.isSharedConnection = isSharedConnection; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * * @return The set of information for the requested partition under the Event Hub this client is associated with. * * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connection.getManagementNode().flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private String getLinkName(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? StringUtil.getRandomString("EC") : StringUtil.getRandomString("PS"); } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final AmqpSendLink openLink = openLinks.get(entityPath); if (openLink != null) { return Mono.just(openLink); } else { return connection.createSendLink(getLinkName(partitionId), entityPath, retryOptions) .map(link -> openLinks.computeIfAbsent(entityPath, unusedKey -> link)); } } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } openLinks.forEach((key, value) -> value.close()); openLinks.clear(); if (!isSharedConnection) { connection.close(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
Should this instantiate a new `DataLakeRequestConditions`? I'm seeing other places which instantiate instead of leaving it as `null`.
public PathInfo flush(long position, boolean overwrite) { DataLakeRequestConditions requestConditions = null; if (!overwrite) { requestConditions = new DataLakeRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return flushWithResponse(position, false, false, null, requestConditions, null, Context.NONE).getValue(); }
DataLakeRequestConditions requestConditions = null;
public PathInfo flush(long position, boolean overwrite) { DataLakeRequestConditions requestConditions = new DataLakeRequestConditions(); if (!overwrite) { requestConditions = new DataLakeRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return flushWithResponse(position, false, false, null, requestConditions, null, Context.NONE).getValue(); }
class DataLakeFileClient extends DataLakePathClient { private final ClientLogger logger = new ClientLogger(DataLakeFileClient.class); private final DataLakeFileAsyncClient dataLakeFileAsyncClient; DataLakeFileClient(DataLakeFileAsyncClient pathAsyncClient, BlockBlobClient blockBlobClient) { super(pathAsyncClient, blockBlobClient); this.dataLakeFileAsyncClient = pathAsyncClient; } private DataLakeFileClient(DataLakePathClient dataLakePathClient) { super(dataLakePathClient.dataLakePathAsyncClient, dataLakePathClient.blockBlobClient); this.dataLakeFileAsyncClient = new DataLakeFileAsyncClient(dataLakePathClient.dataLakePathAsyncClient); } /** * Gets the URL of the file represented by this client on the Data Lake service. * * @return the URL. */ public String getFileUrl() { return getPathUrl(); } /** * Gets the path of this file, not including the name of the resource itself. * * @return The path of the file. */ public String getFilePath() { return getObjectPath(); } /** * Gets the name of this file, not including its full path. * * @return The name of the file. */ public String getFileName() { return getObjectName(); } /** * Deletes a file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.delete} * * <p>For more information see the * <a href="https: * Docs</a></p> */ public void delete() { deleteWithResponse(null, null, Context.NONE).getValue(); } /** * Deletes a file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.deleteWithResponse * * <p>For more information see the * <a href="https: * Docs</a></p> * * @param requestConditions {@link DataLakeRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response containing status code and HTTP headers. */ public Response<Void> deleteWithResponse(DataLakeRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<Void>> response = dataLakePathAsyncClient.deleteWithResponse(null, requestConditions, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Appends data to the specified resource to later be flushed (written) by a call to flush * * <p><strong>Code Samples>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.append * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param data The data to write to the file. * @param fileOffset The position where the data is to be appended. * @param length The exact length of the data. */ public void append(InputStream data, long fileOffset, long length) { appendWithResponse(data, fileOffset, length, null, null, null, Context.NONE); } /** * Appends data to the specified resource to later be flushed (written) by a call to flush * * <p><strong>Code Samples>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param data The data to write to the file. * @param fileOffset The position where the data is to be appended. * @param length The exact length of the data. * @param contentMd5 An MD5 hash of the content of the data. If specified, the service will calculate the MD5 of the * received data and fail the request if it does not match the provided MD5. * @param leaseId By setting lease id, requests will fail if the provided lease does not match the active lease on * the file. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response signalling completion. */ public Response<Void> appendWithResponse(InputStream data, long fileOffset, long length, byte[] contentMd5, String leaseId, Duration timeout, Context context) { Objects.requireNonNull(data); Flux<ByteBuffer> fbb = Utility.convertStreamToByteBuffer(data, length, BlobAsyncClient.BLOB_DEFAULT_UPLOAD_BLOCK_SIZE); Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse( fbb.subscribeOn(Schedulers.elastic()), fileOffset, length, contentMd5, leaseId, context); try { return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } catch (UncheckedIOException e) { throw logger.logExceptionAsError(e); } } /** * Flushes (writes) data previously appended to the file through a call to append. * The previously uploaded data must be contiguous. * By default this method will not overwrite existing data. * * <p><strong>Code Samples>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.flush * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param position The length of the file after all data has been written. * * @return Information about the created resource. */ public PathInfo flush(long position) { return flush(position, false); } /** * Flushes (writes) data previously appended to the file through a call to append. * The previously uploaded data must be contiguous. * * <p><strong>Code Samples>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.flush * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param position The length of the file after all data has been written. * @param overwrite Whether or not to overwrite, should data exist on the file. * * @return Information about the created resource. */ /** * Flushes (writes) data previously appended to the file through a call to append. * The previously uploaded data must be contiguous. * * <p><strong>Code Samples>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param position The length of the file after all data has been written. * @param retainUncommittedData Whether or not uncommitted data is to be retained after the operation. * @param close Whether or not a file changed event raised indicates completion (true) or modification (false). * @param httpHeaders {@link PathHttpHeaders httpHeaders} * @param requestConditions {@link DataLakeRequestConditions requestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response containing the information of the created resource. */ public Response<PathInfo> flushWithResponse(long position, boolean retainUncommittedData, boolean close, PathHttpHeaders httpHeaders, DataLakeRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<PathInfo>> response = dataLakeFileAsyncClient.flushWithResponse(position, retainUncommittedData, close, httpHeaders, requestConditions, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Reads the entire file into an output stream. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.read * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null */ public void read(OutputStream stream) { readWithResponse(stream, null, null, null, false, null, Context.NONE); } /** * Reads a range of bytes from a file into an output stream. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.readWithResponse * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @param range {@link FileRange} * @param options {@link DownloadRetryOptions} * @param requestConditions {@link DataLakeRequestConditions} * @param getRangeContentMd5 Whether the contentMD5 for the specified file range should be returned. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response containing status code and HTTP headers. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null */ public FileReadResponse readWithResponse(OutputStream stream, FileRange range, DownloadRetryOptions options, DataLakeRequestConditions requestConditions, boolean getRangeContentMd5, Duration timeout, Context context) { BlobDownloadResponse response = blockBlobClient.downloadWithResponse(stream, Transforms.toBlobRange(range), Transforms.toBlobDownloadRetryOptions(options), Transforms.toBlobRequestConditions(requestConditions), getRangeContentMd5, timeout, context); return Transforms.toFileReadResponse(response); } /** * Moves the file to another location within the file system. * For more information see the * <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename * * @param destinationPath Relative path from the file system to rename the file to, excludes the file system name. * For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path * in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt" * @return A {@link DataLakeFileClient} used to interact with the new file created. */ public DataLakeFileClient rename(String destinationPath) { return renameWithResponse(destinationPath, null, null, null, null).getValue(); } /** * Moves the file to another location within the file system. * For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.renameWithResponse * * @param destinationPath Relative path from the file system to rename the file to, excludes the file system name. * For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path * in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt" * @param sourceRequestConditions {@link DataLakeRequestConditions} against the source. * @param destinationRequestConditions {@link DataLakeRequestConditions} against the destination. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A {@link Response} whose {@link Response * used to interact with the file created. */ public Response<DataLakeFileClient> renameWithResponse(String destinationPath, DataLakeRequestConditions sourceRequestConditions, DataLakeRequestConditions destinationRequestConditions, Duration timeout, Context context) { Mono<Response<DataLakePathClient>> response = renameWithResponse(destinationPath, sourceRequestConditions, destinationRequestConditions, context); Response<DataLakePathClient> resp = StorageImplUtils.blockWithOptionalTimeout(response, timeout); return new SimpleResponse<>(resp, new DataLakeFileClient(resp.getValue())); } }
class DataLakeFileClient extends DataLakePathClient { private final ClientLogger logger = new ClientLogger(DataLakeFileClient.class); private final DataLakeFileAsyncClient dataLakeFileAsyncClient; DataLakeFileClient(DataLakeFileAsyncClient pathAsyncClient, BlockBlobClient blockBlobClient) { super(pathAsyncClient, blockBlobClient); this.dataLakeFileAsyncClient = pathAsyncClient; } private DataLakeFileClient(DataLakePathClient dataLakePathClient) { super(dataLakePathClient.dataLakePathAsyncClient, dataLakePathClient.blockBlobClient); this.dataLakeFileAsyncClient = new DataLakeFileAsyncClient(dataLakePathClient.dataLakePathAsyncClient); } /** * Gets the URL of the file represented by this client on the Data Lake service. * * @return the URL. */ public String getFileUrl() { return getPathUrl(); } /** * Gets the path of this file, not including the name of the resource itself. * * @return The path of the file. */ public String getFilePath() { return getObjectPath(); } /** * Gets the name of this file, not including its full path. * * @return The name of the file. */ public String getFileName() { return getObjectName(); } /** * Deletes a file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.delete} * * <p>For more information see the * <a href="https: * Docs</a></p> */ public void delete() { deleteWithResponse(null, null, Context.NONE).getValue(); } /** * Deletes a file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.deleteWithResponse * * <p>For more information see the * <a href="https: * Docs</a></p> * * @param requestConditions {@link DataLakeRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response containing status code and HTTP headers. */ public Response<Void> deleteWithResponse(DataLakeRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<Void>> response = dataLakePathAsyncClient.deleteWithResponse(null, requestConditions, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Appends data to the specified resource to later be flushed (written) by a call to flush * * <p><strong>Code Samples>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.append * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param data The data to write to the file. * @param fileOffset The position where the data is to be appended. * @param length The exact length of the data. */ public void append(InputStream data, long fileOffset, long length) { appendWithResponse(data, fileOffset, length, null, null, null, Context.NONE); } /** * Appends data to the specified resource to later be flushed (written) by a call to flush * * <p><strong>Code Samples>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param data The data to write to the file. * @param fileOffset The position where the data is to be appended. * @param length The exact length of the data. * @param contentMd5 An MD5 hash of the content of the data. If specified, the service will calculate the MD5 of the * received data and fail the request if it does not match the provided MD5. * @param leaseId By setting lease id, requests will fail if the provided lease does not match the active lease on * the file. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response signalling completion. */ public Response<Void> appendWithResponse(InputStream data, long fileOffset, long length, byte[] contentMd5, String leaseId, Duration timeout, Context context) { Objects.requireNonNull(data); Flux<ByteBuffer> fbb = Utility.convertStreamToByteBuffer(data, length, BlobAsyncClient.BLOB_DEFAULT_UPLOAD_BLOCK_SIZE); Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse( fbb.subscribeOn(Schedulers.elastic()), fileOffset, length, contentMd5, leaseId, context); try { return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } catch (UncheckedIOException e) { throw logger.logExceptionAsError(e); } } /** * Flushes (writes) data previously appended to the file through a call to append. * The previously uploaded data must be contiguous. * <p>By default this method will not overwrite existing data.</p> * * <p><strong>Code Samples>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.flush * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param position The length of the file after all data has been written. * * @return Information about the created resource. */ public PathInfo flush(long position) { return flush(position, false); } /** * Flushes (writes) data previously appended to the file through a call to append. * The previously uploaded data must be contiguous. * * <p><strong>Code Samples>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.flush * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param position The length of the file after all data has been written. * @param overwrite Whether or not to overwrite, should data exist on the file. * * @return Information about the created resource. */ /** * Flushes (writes) data previously appended to the file through a call to append. * The previously uploaded data must be contiguous. * * <p><strong>Code Samples>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param position The length of the file after all data has been written. * @param retainUncommittedData Whether or not uncommitted data is to be retained after the operation. * @param close Whether or not a file changed event raised indicates completion (true) or modification (false). * @param httpHeaders {@link PathHttpHeaders httpHeaders} * @param requestConditions {@link DataLakeRequestConditions requestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response containing the information of the created resource. */ public Response<PathInfo> flushWithResponse(long position, boolean retainUncommittedData, boolean close, PathHttpHeaders httpHeaders, DataLakeRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<PathInfo>> response = dataLakeFileAsyncClient.flushWithResponse(position, retainUncommittedData, close, httpHeaders, requestConditions, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Reads the entire file into an output stream. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.read * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null */ public void read(OutputStream stream) { readWithResponse(stream, null, null, null, false, null, Context.NONE); } /** * Reads a range of bytes from a file into an output stream. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.readWithResponse * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @param range {@link FileRange} * @param options {@link DownloadRetryOptions} * @param requestConditions {@link DataLakeRequestConditions} * @param getRangeContentMd5 Whether the contentMD5 for the specified file range should be returned. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response containing status code and HTTP headers. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null */ public FileReadResponse readWithResponse(OutputStream stream, FileRange range, DownloadRetryOptions options, DataLakeRequestConditions requestConditions, boolean getRangeContentMd5, Duration timeout, Context context) { return DataLakeImplUtils.returnOrConvertException(() -> { BlobDownloadResponse response = blockBlobClient.downloadWithResponse(stream, Transforms.toBlobRange(range), Transforms.toBlobDownloadRetryOptions(options), Transforms.toBlobRequestConditions(requestConditions), getRangeContentMd5, timeout, context); return Transforms.toFileReadResponse(response); }, logger); } /** * Moves the file to another location within the file system. * For more information see the * <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename * * @param destinationPath Relative path from the file system to rename the file to, excludes the file system name. * For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path * in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt" * @return A {@link DataLakeFileClient} used to interact with the new file created. */ public DataLakeFileClient rename(String destinationPath) { return renameWithResponse(destinationPath, null, null, null, null).getValue(); } /** * Moves the file to another location within the file system. * For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.renameWithResponse * * @param destinationPath Relative path from the file system to rename the file to, excludes the file system name. * For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path * in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt" * @param sourceRequestConditions {@link DataLakeRequestConditions} against the source. * @param destinationRequestConditions {@link DataLakeRequestConditions} against the destination. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A {@link Response} whose {@link Response * used to interact with the file created. */ public Response<DataLakeFileClient> renameWithResponse(String destinationPath, DataLakeRequestConditions sourceRequestConditions, DataLakeRequestConditions destinationRequestConditions, Duration timeout, Context context) { Mono<Response<DataLakePathClient>> response = renameWithResponse(destinationPath, sourceRequestConditions, destinationRequestConditions, context); Response<DataLakePathClient> resp = StorageImplUtils.blockWithOptionalTimeout(response, timeout); return new SimpleResponse<>(resp, new DataLakeFileClient(resp.getValue())); } }
Oh whoops thanks for catching that
public PathInfo flush(long position, boolean overwrite) { DataLakeRequestConditions requestConditions = null; if (!overwrite) { requestConditions = new DataLakeRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return flushWithResponse(position, false, false, null, requestConditions, null, Context.NONE).getValue(); }
DataLakeRequestConditions requestConditions = null;
public PathInfo flush(long position, boolean overwrite) { DataLakeRequestConditions requestConditions = new DataLakeRequestConditions(); if (!overwrite) { requestConditions = new DataLakeRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return flushWithResponse(position, false, false, null, requestConditions, null, Context.NONE).getValue(); }
class DataLakeFileClient extends DataLakePathClient { private final ClientLogger logger = new ClientLogger(DataLakeFileClient.class); private final DataLakeFileAsyncClient dataLakeFileAsyncClient; DataLakeFileClient(DataLakeFileAsyncClient pathAsyncClient, BlockBlobClient blockBlobClient) { super(pathAsyncClient, blockBlobClient); this.dataLakeFileAsyncClient = pathAsyncClient; } private DataLakeFileClient(DataLakePathClient dataLakePathClient) { super(dataLakePathClient.dataLakePathAsyncClient, dataLakePathClient.blockBlobClient); this.dataLakeFileAsyncClient = new DataLakeFileAsyncClient(dataLakePathClient.dataLakePathAsyncClient); } /** * Gets the URL of the file represented by this client on the Data Lake service. * * @return the URL. */ public String getFileUrl() { return getPathUrl(); } /** * Gets the path of this file, not including the name of the resource itself. * * @return The path of the file. */ public String getFilePath() { return getObjectPath(); } /** * Gets the name of this file, not including its full path. * * @return The name of the file. */ public String getFileName() { return getObjectName(); } /** * Deletes a file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.delete} * * <p>For more information see the * <a href="https: * Docs</a></p> */ public void delete() { deleteWithResponse(null, null, Context.NONE).getValue(); } /** * Deletes a file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.deleteWithResponse * * <p>For more information see the * <a href="https: * Docs</a></p> * * @param requestConditions {@link DataLakeRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response containing status code and HTTP headers. */ public Response<Void> deleteWithResponse(DataLakeRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<Void>> response = dataLakePathAsyncClient.deleteWithResponse(null, requestConditions, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Appends data to the specified resource to later be flushed (written) by a call to flush * * <p><strong>Code Samples>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.append * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param data The data to write to the file. * @param fileOffset The position where the data is to be appended. * @param length The exact length of the data. */ public void append(InputStream data, long fileOffset, long length) { appendWithResponse(data, fileOffset, length, null, null, null, Context.NONE); } /** * Appends data to the specified resource to later be flushed (written) by a call to flush * * <p><strong>Code Samples>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param data The data to write to the file. * @param fileOffset The position where the data is to be appended. * @param length The exact length of the data. * @param contentMd5 An MD5 hash of the content of the data. If specified, the service will calculate the MD5 of the * received data and fail the request if it does not match the provided MD5. * @param leaseId By setting lease id, requests will fail if the provided lease does not match the active lease on * the file. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response signalling completion. */ public Response<Void> appendWithResponse(InputStream data, long fileOffset, long length, byte[] contentMd5, String leaseId, Duration timeout, Context context) { Objects.requireNonNull(data); Flux<ByteBuffer> fbb = Utility.convertStreamToByteBuffer(data, length, BlobAsyncClient.BLOB_DEFAULT_UPLOAD_BLOCK_SIZE); Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse( fbb.subscribeOn(Schedulers.elastic()), fileOffset, length, contentMd5, leaseId, context); try { return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } catch (UncheckedIOException e) { throw logger.logExceptionAsError(e); } } /** * Flushes (writes) data previously appended to the file through a call to append. * The previously uploaded data must be contiguous. * By default this method will not overwrite existing data. * * <p><strong>Code Samples>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.flush * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param position The length of the file after all data has been written. * * @return Information about the created resource. */ public PathInfo flush(long position) { return flush(position, false); } /** * Flushes (writes) data previously appended to the file through a call to append. * The previously uploaded data must be contiguous. * * <p><strong>Code Samples>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.flush * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param position The length of the file after all data has been written. * @param overwrite Whether or not to overwrite, should data exist on the file. * * @return Information about the created resource. */ /** * Flushes (writes) data previously appended to the file through a call to append. * The previously uploaded data must be contiguous. * * <p><strong>Code Samples>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param position The length of the file after all data has been written. * @param retainUncommittedData Whether or not uncommitted data is to be retained after the operation. * @param close Whether or not a file changed event raised indicates completion (true) or modification (false). * @param httpHeaders {@link PathHttpHeaders httpHeaders} * @param requestConditions {@link DataLakeRequestConditions requestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response containing the information of the created resource. */ public Response<PathInfo> flushWithResponse(long position, boolean retainUncommittedData, boolean close, PathHttpHeaders httpHeaders, DataLakeRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<PathInfo>> response = dataLakeFileAsyncClient.flushWithResponse(position, retainUncommittedData, close, httpHeaders, requestConditions, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Reads the entire file into an output stream. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.read * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null */ public void read(OutputStream stream) { readWithResponse(stream, null, null, null, false, null, Context.NONE); } /** * Reads a range of bytes from a file into an output stream. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.readWithResponse * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @param range {@link FileRange} * @param options {@link DownloadRetryOptions} * @param requestConditions {@link DataLakeRequestConditions} * @param getRangeContentMd5 Whether the contentMD5 for the specified file range should be returned. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response containing status code and HTTP headers. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null */ public FileReadResponse readWithResponse(OutputStream stream, FileRange range, DownloadRetryOptions options, DataLakeRequestConditions requestConditions, boolean getRangeContentMd5, Duration timeout, Context context) { BlobDownloadResponse response = blockBlobClient.downloadWithResponse(stream, Transforms.toBlobRange(range), Transforms.toBlobDownloadRetryOptions(options), Transforms.toBlobRequestConditions(requestConditions), getRangeContentMd5, timeout, context); return Transforms.toFileReadResponse(response); } /** * Moves the file to another location within the file system. * For more information see the * <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename * * @param destinationPath Relative path from the file system to rename the file to, excludes the file system name. * For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path * in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt" * @return A {@link DataLakeFileClient} used to interact with the new file created. */ public DataLakeFileClient rename(String destinationPath) { return renameWithResponse(destinationPath, null, null, null, null).getValue(); } /** * Moves the file to another location within the file system. * For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.renameWithResponse * * @param destinationPath Relative path from the file system to rename the file to, excludes the file system name. * For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path * in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt" * @param sourceRequestConditions {@link DataLakeRequestConditions} against the source. * @param destinationRequestConditions {@link DataLakeRequestConditions} against the destination. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A {@link Response} whose {@link Response * used to interact with the file created. */ public Response<DataLakeFileClient> renameWithResponse(String destinationPath, DataLakeRequestConditions sourceRequestConditions, DataLakeRequestConditions destinationRequestConditions, Duration timeout, Context context) { Mono<Response<DataLakePathClient>> response = renameWithResponse(destinationPath, sourceRequestConditions, destinationRequestConditions, context); Response<DataLakePathClient> resp = StorageImplUtils.blockWithOptionalTimeout(response, timeout); return new SimpleResponse<>(resp, new DataLakeFileClient(resp.getValue())); } }
class DataLakeFileClient extends DataLakePathClient { private final ClientLogger logger = new ClientLogger(DataLakeFileClient.class); private final DataLakeFileAsyncClient dataLakeFileAsyncClient; DataLakeFileClient(DataLakeFileAsyncClient pathAsyncClient, BlockBlobClient blockBlobClient) { super(pathAsyncClient, blockBlobClient); this.dataLakeFileAsyncClient = pathAsyncClient; } private DataLakeFileClient(DataLakePathClient dataLakePathClient) { super(dataLakePathClient.dataLakePathAsyncClient, dataLakePathClient.blockBlobClient); this.dataLakeFileAsyncClient = new DataLakeFileAsyncClient(dataLakePathClient.dataLakePathAsyncClient); } /** * Gets the URL of the file represented by this client on the Data Lake service. * * @return the URL. */ public String getFileUrl() { return getPathUrl(); } /** * Gets the path of this file, not including the name of the resource itself. * * @return The path of the file. */ public String getFilePath() { return getObjectPath(); } /** * Gets the name of this file, not including its full path. * * @return The name of the file. */ public String getFileName() { return getObjectName(); } /** * Deletes a file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.delete} * * <p>For more information see the * <a href="https: * Docs</a></p> */ public void delete() { deleteWithResponse(null, null, Context.NONE).getValue(); } /** * Deletes a file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.deleteWithResponse * * <p>For more information see the * <a href="https: * Docs</a></p> * * @param requestConditions {@link DataLakeRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response containing status code and HTTP headers. */ public Response<Void> deleteWithResponse(DataLakeRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<Void>> response = dataLakePathAsyncClient.deleteWithResponse(null, requestConditions, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Appends data to the specified resource to later be flushed (written) by a call to flush * * <p><strong>Code Samples>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.append * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param data The data to write to the file. * @param fileOffset The position where the data is to be appended. * @param length The exact length of the data. */ public void append(InputStream data, long fileOffset, long length) { appendWithResponse(data, fileOffset, length, null, null, null, Context.NONE); } /** * Appends data to the specified resource to later be flushed (written) by a call to flush * * <p><strong>Code Samples>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param data The data to write to the file. * @param fileOffset The position where the data is to be appended. * @param length The exact length of the data. * @param contentMd5 An MD5 hash of the content of the data. If specified, the service will calculate the MD5 of the * received data and fail the request if it does not match the provided MD5. * @param leaseId By setting lease id, requests will fail if the provided lease does not match the active lease on * the file. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response signalling completion. */ public Response<Void> appendWithResponse(InputStream data, long fileOffset, long length, byte[] contentMd5, String leaseId, Duration timeout, Context context) { Objects.requireNonNull(data); Flux<ByteBuffer> fbb = Utility.convertStreamToByteBuffer(data, length, BlobAsyncClient.BLOB_DEFAULT_UPLOAD_BLOCK_SIZE); Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse( fbb.subscribeOn(Schedulers.elastic()), fileOffset, length, contentMd5, leaseId, context); try { return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } catch (UncheckedIOException e) { throw logger.logExceptionAsError(e); } } /** * Flushes (writes) data previously appended to the file through a call to append. * The previously uploaded data must be contiguous. * <p>By default this method will not overwrite existing data.</p> * * <p><strong>Code Samples>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.flush * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param position The length of the file after all data has been written. * * @return Information about the created resource. */ public PathInfo flush(long position) { return flush(position, false); } /** * Flushes (writes) data previously appended to the file through a call to append. * The previously uploaded data must be contiguous. * * <p><strong>Code Samples>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.flush * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param position The length of the file after all data has been written. * @param overwrite Whether or not to overwrite, should data exist on the file. * * @return Information about the created resource. */ /** * Flushes (writes) data previously appended to the file through a call to append. * The previously uploaded data must be contiguous. * * <p><strong>Code Samples>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param position The length of the file after all data has been written. * @param retainUncommittedData Whether or not uncommitted data is to be retained after the operation. * @param close Whether or not a file changed event raised indicates completion (true) or modification (false). * @param httpHeaders {@link PathHttpHeaders httpHeaders} * @param requestConditions {@link DataLakeRequestConditions requestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response containing the information of the created resource. */ public Response<PathInfo> flushWithResponse(long position, boolean retainUncommittedData, boolean close, PathHttpHeaders httpHeaders, DataLakeRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<PathInfo>> response = dataLakeFileAsyncClient.flushWithResponse(position, retainUncommittedData, close, httpHeaders, requestConditions, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Reads the entire file into an output stream. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.read * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null */ public void read(OutputStream stream) { readWithResponse(stream, null, null, null, false, null, Context.NONE); } /** * Reads a range of bytes from a file into an output stream. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.readWithResponse * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @param range {@link FileRange} * @param options {@link DownloadRetryOptions} * @param requestConditions {@link DataLakeRequestConditions} * @param getRangeContentMd5 Whether the contentMD5 for the specified file range should be returned. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response containing status code and HTTP headers. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null */ public FileReadResponse readWithResponse(OutputStream stream, FileRange range, DownloadRetryOptions options, DataLakeRequestConditions requestConditions, boolean getRangeContentMd5, Duration timeout, Context context) { return DataLakeImplUtils.returnOrConvertException(() -> { BlobDownloadResponse response = blockBlobClient.downloadWithResponse(stream, Transforms.toBlobRange(range), Transforms.toBlobDownloadRetryOptions(options), Transforms.toBlobRequestConditions(requestConditions), getRangeContentMd5, timeout, context); return Transforms.toFileReadResponse(response); }, logger); } /** * Moves the file to another location within the file system. * For more information see the * <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename * * @param destinationPath Relative path from the file system to rename the file to, excludes the file system name. * For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path * in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt" * @return A {@link DataLakeFileClient} used to interact with the new file created. */ public DataLakeFileClient rename(String destinationPath) { return renameWithResponse(destinationPath, null, null, null, null).getValue(); } /** * Moves the file to another location within the file system. * For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.renameWithResponse * * @param destinationPath Relative path from the file system to rename the file to, excludes the file system name. * For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path * in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt" * @param sourceRequestConditions {@link DataLakeRequestConditions} against the source. * @param destinationRequestConditions {@link DataLakeRequestConditions} against the destination. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A {@link Response} whose {@link Response * used to interact with the file created. */ public Response<DataLakeFileClient> renameWithResponse(String destinationPath, DataLakeRequestConditions sourceRequestConditions, DataLakeRequestConditions destinationRequestConditions, Duration timeout, Context context) { Mono<Response<DataLakePathClient>> response = renameWithResponse(destinationPath, sourceRequestConditions, destinationRequestConditions, context); Response<DataLakePathClient> resp = StorageImplUtils.blockWithOptionalTimeout(response, timeout); return new SimpleResponse<>(resp, new DataLakeFileClient(resp.getValue())); } }
Done
public PathInfo flush(long position, boolean overwrite) { DataLakeRequestConditions requestConditions = null; if (!overwrite) { requestConditions = new DataLakeRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return flushWithResponse(position, false, false, null, requestConditions, null, Context.NONE).getValue(); }
DataLakeRequestConditions requestConditions = null;
public PathInfo flush(long position, boolean overwrite) { DataLakeRequestConditions requestConditions = new DataLakeRequestConditions(); if (!overwrite) { requestConditions = new DataLakeRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return flushWithResponse(position, false, false, null, requestConditions, null, Context.NONE).getValue(); }
class DataLakeFileClient extends DataLakePathClient { private final ClientLogger logger = new ClientLogger(DataLakeFileClient.class); private final DataLakeFileAsyncClient dataLakeFileAsyncClient; DataLakeFileClient(DataLakeFileAsyncClient pathAsyncClient, BlockBlobClient blockBlobClient) { super(pathAsyncClient, blockBlobClient); this.dataLakeFileAsyncClient = pathAsyncClient; } private DataLakeFileClient(DataLakePathClient dataLakePathClient) { super(dataLakePathClient.dataLakePathAsyncClient, dataLakePathClient.blockBlobClient); this.dataLakeFileAsyncClient = new DataLakeFileAsyncClient(dataLakePathClient.dataLakePathAsyncClient); } /** * Gets the URL of the file represented by this client on the Data Lake service. * * @return the URL. */ public String getFileUrl() { return getPathUrl(); } /** * Gets the path of this file, not including the name of the resource itself. * * @return The path of the file. */ public String getFilePath() { return getObjectPath(); } /** * Gets the name of this file, not including its full path. * * @return The name of the file. */ public String getFileName() { return getObjectName(); } /** * Deletes a file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.delete} * * <p>For more information see the * <a href="https: * Docs</a></p> */ public void delete() { deleteWithResponse(null, null, Context.NONE).getValue(); } /** * Deletes a file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.deleteWithResponse * * <p>For more information see the * <a href="https: * Docs</a></p> * * @param requestConditions {@link DataLakeRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response containing status code and HTTP headers. */ public Response<Void> deleteWithResponse(DataLakeRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<Void>> response = dataLakePathAsyncClient.deleteWithResponse(null, requestConditions, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Appends data to the specified resource to later be flushed (written) by a call to flush * * <p><strong>Code Samples>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.append * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param data The data to write to the file. * @param fileOffset The position where the data is to be appended. * @param length The exact length of the data. */ public void append(InputStream data, long fileOffset, long length) { appendWithResponse(data, fileOffset, length, null, null, null, Context.NONE); } /** * Appends data to the specified resource to later be flushed (written) by a call to flush * * <p><strong>Code Samples>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param data The data to write to the file. * @param fileOffset The position where the data is to be appended. * @param length The exact length of the data. * @param contentMd5 An MD5 hash of the content of the data. If specified, the service will calculate the MD5 of the * received data and fail the request if it does not match the provided MD5. * @param leaseId By setting lease id, requests will fail if the provided lease does not match the active lease on * the file. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response signalling completion. */ public Response<Void> appendWithResponse(InputStream data, long fileOffset, long length, byte[] contentMd5, String leaseId, Duration timeout, Context context) { Objects.requireNonNull(data); Flux<ByteBuffer> fbb = Utility.convertStreamToByteBuffer(data, length, BlobAsyncClient.BLOB_DEFAULT_UPLOAD_BLOCK_SIZE); Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse( fbb.subscribeOn(Schedulers.elastic()), fileOffset, length, contentMd5, leaseId, context); try { return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } catch (UncheckedIOException e) { throw logger.logExceptionAsError(e); } } /** * Flushes (writes) data previously appended to the file through a call to append. * The previously uploaded data must be contiguous. * By default this method will not overwrite existing data. * * <p><strong>Code Samples>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.flush * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param position The length of the file after all data has been written. * * @return Information about the created resource. */ public PathInfo flush(long position) { return flush(position, false); } /** * Flushes (writes) data previously appended to the file through a call to append. * The previously uploaded data must be contiguous. * * <p><strong>Code Samples>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.flush * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param position The length of the file after all data has been written. * @param overwrite Whether or not to overwrite, should data exist on the file. * * @return Information about the created resource. */ /** * Flushes (writes) data previously appended to the file through a call to append. * The previously uploaded data must be contiguous. * * <p><strong>Code Samples>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param position The length of the file after all data has been written. * @param retainUncommittedData Whether or not uncommitted data is to be retained after the operation. * @param close Whether or not a file changed event raised indicates completion (true) or modification (false). * @param httpHeaders {@link PathHttpHeaders httpHeaders} * @param requestConditions {@link DataLakeRequestConditions requestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response containing the information of the created resource. */ public Response<PathInfo> flushWithResponse(long position, boolean retainUncommittedData, boolean close, PathHttpHeaders httpHeaders, DataLakeRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<PathInfo>> response = dataLakeFileAsyncClient.flushWithResponse(position, retainUncommittedData, close, httpHeaders, requestConditions, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Reads the entire file into an output stream. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.read * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null */ public void read(OutputStream stream) { readWithResponse(stream, null, null, null, false, null, Context.NONE); } /** * Reads a range of bytes from a file into an output stream. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.readWithResponse * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @param range {@link FileRange} * @param options {@link DownloadRetryOptions} * @param requestConditions {@link DataLakeRequestConditions} * @param getRangeContentMd5 Whether the contentMD5 for the specified file range should be returned. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response containing status code and HTTP headers. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null */ public FileReadResponse readWithResponse(OutputStream stream, FileRange range, DownloadRetryOptions options, DataLakeRequestConditions requestConditions, boolean getRangeContentMd5, Duration timeout, Context context) { BlobDownloadResponse response = blockBlobClient.downloadWithResponse(stream, Transforms.toBlobRange(range), Transforms.toBlobDownloadRetryOptions(options), Transforms.toBlobRequestConditions(requestConditions), getRangeContentMd5, timeout, context); return Transforms.toFileReadResponse(response); } /** * Moves the file to another location within the file system. * For more information see the * <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename * * @param destinationPath Relative path from the file system to rename the file to, excludes the file system name. * For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path * in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt" * @return A {@link DataLakeFileClient} used to interact with the new file created. */ public DataLakeFileClient rename(String destinationPath) { return renameWithResponse(destinationPath, null, null, null, null).getValue(); } /** * Moves the file to another location within the file system. * For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.renameWithResponse * * @param destinationPath Relative path from the file system to rename the file to, excludes the file system name. * For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path * in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt" * @param sourceRequestConditions {@link DataLakeRequestConditions} against the source. * @param destinationRequestConditions {@link DataLakeRequestConditions} against the destination. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A {@link Response} whose {@link Response * used to interact with the file created. */ public Response<DataLakeFileClient> renameWithResponse(String destinationPath, DataLakeRequestConditions sourceRequestConditions, DataLakeRequestConditions destinationRequestConditions, Duration timeout, Context context) { Mono<Response<DataLakePathClient>> response = renameWithResponse(destinationPath, sourceRequestConditions, destinationRequestConditions, context); Response<DataLakePathClient> resp = StorageImplUtils.blockWithOptionalTimeout(response, timeout); return new SimpleResponse<>(resp, new DataLakeFileClient(resp.getValue())); } }
class DataLakeFileClient extends DataLakePathClient { private final ClientLogger logger = new ClientLogger(DataLakeFileClient.class); private final DataLakeFileAsyncClient dataLakeFileAsyncClient; DataLakeFileClient(DataLakeFileAsyncClient pathAsyncClient, BlockBlobClient blockBlobClient) { super(pathAsyncClient, blockBlobClient); this.dataLakeFileAsyncClient = pathAsyncClient; } private DataLakeFileClient(DataLakePathClient dataLakePathClient) { super(dataLakePathClient.dataLakePathAsyncClient, dataLakePathClient.blockBlobClient); this.dataLakeFileAsyncClient = new DataLakeFileAsyncClient(dataLakePathClient.dataLakePathAsyncClient); } /** * Gets the URL of the file represented by this client on the Data Lake service. * * @return the URL. */ public String getFileUrl() { return getPathUrl(); } /** * Gets the path of this file, not including the name of the resource itself. * * @return The path of the file. */ public String getFilePath() { return getObjectPath(); } /** * Gets the name of this file, not including its full path. * * @return The name of the file. */ public String getFileName() { return getObjectName(); } /** * Deletes a file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.delete} * * <p>For more information see the * <a href="https: * Docs</a></p> */ public void delete() { deleteWithResponse(null, null, Context.NONE).getValue(); } /** * Deletes a file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.deleteWithResponse * * <p>For more information see the * <a href="https: * Docs</a></p> * * @param requestConditions {@link DataLakeRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response containing status code and HTTP headers. */ public Response<Void> deleteWithResponse(DataLakeRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<Void>> response = dataLakePathAsyncClient.deleteWithResponse(null, requestConditions, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Appends data to the specified resource to later be flushed (written) by a call to flush * * <p><strong>Code Samples>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.append * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param data The data to write to the file. * @param fileOffset The position where the data is to be appended. * @param length The exact length of the data. */ public void append(InputStream data, long fileOffset, long length) { appendWithResponse(data, fileOffset, length, null, null, null, Context.NONE); } /** * Appends data to the specified resource to later be flushed (written) by a call to flush * * <p><strong>Code Samples>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param data The data to write to the file. * @param fileOffset The position where the data is to be appended. * @param length The exact length of the data. * @param contentMd5 An MD5 hash of the content of the data. If specified, the service will calculate the MD5 of the * received data and fail the request if it does not match the provided MD5. * @param leaseId By setting lease id, requests will fail if the provided lease does not match the active lease on * the file. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response signalling completion. */ public Response<Void> appendWithResponse(InputStream data, long fileOffset, long length, byte[] contentMd5, String leaseId, Duration timeout, Context context) { Objects.requireNonNull(data); Flux<ByteBuffer> fbb = Utility.convertStreamToByteBuffer(data, length, BlobAsyncClient.BLOB_DEFAULT_UPLOAD_BLOCK_SIZE); Mono<Response<Void>> response = dataLakeFileAsyncClient.appendWithResponse( fbb.subscribeOn(Schedulers.elastic()), fileOffset, length, contentMd5, leaseId, context); try { return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } catch (UncheckedIOException e) { throw logger.logExceptionAsError(e); } } /** * Flushes (writes) data previously appended to the file through a call to append. * The previously uploaded data must be contiguous. * <p>By default this method will not overwrite existing data.</p> * * <p><strong>Code Samples>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.flush * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param position The length of the file after all data has been written. * * @return Information about the created resource. */ public PathInfo flush(long position) { return flush(position, false); } /** * Flushes (writes) data previously appended to the file through a call to append. * The previously uploaded data must be contiguous. * * <p><strong>Code Samples>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.flush * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param position The length of the file after all data has been written. * @param overwrite Whether or not to overwrite, should data exist on the file. * * @return Information about the created resource. */ /** * Flushes (writes) data previously appended to the file through a call to append. * The previously uploaded data must be contiguous. * * <p><strong>Code Samples>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param position The length of the file after all data has been written. * @param retainUncommittedData Whether or not uncommitted data is to be retained after the operation. * @param close Whether or not a file changed event raised indicates completion (true) or modification (false). * @param httpHeaders {@link PathHttpHeaders httpHeaders} * @param requestConditions {@link DataLakeRequestConditions requestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response containing the information of the created resource. */ public Response<PathInfo> flushWithResponse(long position, boolean retainUncommittedData, boolean close, PathHttpHeaders httpHeaders, DataLakeRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<PathInfo>> response = dataLakeFileAsyncClient.flushWithResponse(position, retainUncommittedData, close, httpHeaders, requestConditions, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Reads the entire file into an output stream. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.read * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null */ public void read(OutputStream stream) { readWithResponse(stream, null, null, null, false, null, Context.NONE); } /** * Reads a range of bytes from a file into an output stream. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.readWithResponse * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @param range {@link FileRange} * @param options {@link DownloadRetryOptions} * @param requestConditions {@link DataLakeRequestConditions} * @param getRangeContentMd5 Whether the contentMD5 for the specified file range should be returned. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response containing status code and HTTP headers. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null */ public FileReadResponse readWithResponse(OutputStream stream, FileRange range, DownloadRetryOptions options, DataLakeRequestConditions requestConditions, boolean getRangeContentMd5, Duration timeout, Context context) { return DataLakeImplUtils.returnOrConvertException(() -> { BlobDownloadResponse response = blockBlobClient.downloadWithResponse(stream, Transforms.toBlobRange(range), Transforms.toBlobDownloadRetryOptions(options), Transforms.toBlobRequestConditions(requestConditions), getRangeContentMd5, timeout, context); return Transforms.toFileReadResponse(response); }, logger); } /** * Moves the file to another location within the file system. * For more information see the * <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename * * @param destinationPath Relative path from the file system to rename the file to, excludes the file system name. * For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path * in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt" * @return A {@link DataLakeFileClient} used to interact with the new file created. */ public DataLakeFileClient rename(String destinationPath) { return renameWithResponse(destinationPath, null, null, null, null).getValue(); } /** * Moves the file to another location within the file system. * For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.renameWithResponse * * @param destinationPath Relative path from the file system to rename the file to, excludes the file system name. * For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path * in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt" * @param sourceRequestConditions {@link DataLakeRequestConditions} against the source. * @param destinationRequestConditions {@link DataLakeRequestConditions} against the destination. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A {@link Response} whose {@link Response * used to interact with the file created. */ public Response<DataLakeFileClient> renameWithResponse(String destinationPath, DataLakeRequestConditions sourceRequestConditions, DataLakeRequestConditions destinationRequestConditions, Duration timeout, Context context) { Mono<Response<DataLakePathClient>> response = renameWithResponse(destinationPath, sourceRequestConditions, destinationRequestConditions, context); Response<DataLakePathClient> resp = StorageImplUtils.blockWithOptionalTimeout(response, timeout); return new SimpleResponse<>(resp, new DataLakeFileClient(resp.getValue())); } }
for the sake of having the same naming format as other Azure SDKs, can we use '.' as a separator everywhere? E.g. 'Appconfig.getKey' rather than 'Appconfig/getKey'
private Context startTracingSpan(Method method, Context context) { String spanName = String.format("%s/%s", interfaceParser.getServiceName(), method.getName()); context = TracerProxy.setSpanName(spanName, context); return TracerProxy.start(spanName, context); }
String spanName = String.format("%s/%s", interfaceParser.getServiceName(), method.getName());
private Context startTracingSpan(Method method, Context context) { String spanName = String.format("%s.%s", interfaceParser.getServiceName(), method.getName()); context = TracerProxy.setSpanName(spanName, context); return TracerProxy.start(spanName, context); }
class RestProxy implements InvocationHandler { private final ClientLogger logger = new ClientLogger(RestProxy.class); private final HttpPipeline httpPipeline; private final SerializerAdapter serializer; private final SwaggerInterfaceParser interfaceParser; private final HttpResponseDecoder decoder; private final ResponseConstructorsCache responseConstructorsCache; /** * Create a RestProxy. * * @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP * requests. * @param serializer the serializer that will be used to convert response bodies to POJOs. * @param interfaceParser the parser that contains information about the interface describing REST API methods * that this RestProxy "implements". */ private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) { this.httpPipeline = httpPipeline; this.serializer = serializer; this.interfaceParser = interfaceParser; this.decoder = new HttpResponseDecoder(this.serializer); this.responseConstructorsCache = new ResponseConstructorsCache(); } /** * Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger * interface that this RestProxy was created to "implement". * * @param method the method to get a SwaggerMethodParser for * @return the SwaggerMethodParser for the provided method */ private SwaggerMethodParser getMethodParser(Method method) { return interfaceParser.getMethodParser(method); } /** * Send the provided request asynchronously, applying any request policies provided to the HttpClient instance. * * @param request the HTTP request to send * @param contextData the context * @return a {@link Mono} that emits HttpResponse asynchronously */ public Mono<HttpResponse> send(HttpRequest request, Context contextData) { return httpPipeline.send(request, contextData); } @Override public Object invoke(Object proxy, final Method method, Object[] args) { try { final SwaggerMethodParser methodParser; final HttpRequest request; if (method.isAnnotationPresent(ResumeOperation.class)) { OperationDescription opDesc = CoreUtils.findFirstOfType(args, OperationDescription.class); Method resumeMethod = determineResumeMethod(method, opDesc.getMethodName()); methodParser = getMethodParser(resumeMethod); request = createHttpRequest(opDesc, methodParser, args); final Type returnType = methodParser.getReturnType(); return handleResumeOperation( request, opDesc, methodParser, returnType, startTracingSpan(resumeMethod, Context.NONE)); } else { methodParser = getMethodParser(method); request = createHttpRequest(methodParser, args); Context context = methodParser.setContext(args).addData("caller-method", methodParser.getFullyQualifiedMethodName()); context = startTracingSpan(method, context); if (request.getBody() != null) { request.setBody(validateLength(request)); } final Mono<HttpResponse> asyncResponse = send(request, context); Mono<HttpDecodedResponse> asyncDecodedResponse = this.decoder.decode(asyncResponse, methodParser); return handleHttpResponse(request, asyncDecodedResponse, methodParser, methodParser.getReturnType(), context); } } catch (IOException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } } private Flux<ByteBuffer> validateLength(final HttpRequest request) { final Flux<ByteBuffer> bbFlux = request.getBody(); if (bbFlux == null) { return Flux.empty(); } return Flux.defer(() -> { Long expectedLength = Long.valueOf(request.getHeaders().getValue("Content-Length")); final long[] currentTotalLength = new long[1]; return bbFlux.doOnEach(s -> { if (s.isOnNext()) { ByteBuffer byteBuffer = s.get(); int currentLength = (byteBuffer == null) ? 0 : byteBuffer.remaining(); currentTotalLength[0] += currentLength; if (currentTotalLength[0] > expectedLength) { throw logger.logExceptionAsError(new UnexpectedLengthException( String.format("Request body emitted %d bytes more than the expected %d bytes.", currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); } } else if (s.isOnComplete()) { if (expectedLength.compareTo(currentTotalLength[0]) != 0) { throw logger.logExceptionAsError(new UnexpectedLengthException( String.format("Request body emitted %d bytes less than the expected %d bytes.", currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); } } }); }); } private Method determineResumeMethod(Method method, String resumeMethodName) { for (Method potentialResumeMethod : method.getDeclaringClass().getMethods()) { if (potentialResumeMethod.getName().equals(resumeMethodName)) { return potentialResumeMethod; } } return null; } /** * Starts the tracing span for the current service call, additionally set metadata attributes on the span by passing * additional context information. * @param method Service method being called. * @param context Context information about the current service call. * @return The updated context containing the span context. */ /** * Create a HttpRequest for the provided Swagger method using the provided arguments. * * @param methodParser the Swagger method parser to use * @param args the arguments to use to populate the method's annotation values * @return a HttpRequest * @throws IOException thrown if the body contents cannot be serialized */ private HttpRequest createHttpRequest(SwaggerMethodParser methodParser, Object[] args) throws IOException { UrlBuilder urlBuilder; final String path = methodParser.setPath(args); final UrlBuilder pathUrlBuilder = UrlBuilder.parse(path); if (pathUrlBuilder.getScheme() != null) { urlBuilder = pathUrlBuilder; } else { urlBuilder = new UrlBuilder(); final String scheme = methodParser.setScheme(args); urlBuilder.setScheme(scheme); final String host = methodParser.setHost(args); urlBuilder.setHost(host); if (path != null && !path.isEmpty() && !path.equals("/")) { String hostPath = urlBuilder.getPath(); if (hostPath == null || hostPath.isEmpty() || hostPath.equals("/") || path.contains(": urlBuilder.setPath(path); } else { urlBuilder.setPath(hostPath + "/" + path); } } } for (final EncodedParameter queryParameter : methodParser.setEncodedQueryParameters(args)) { urlBuilder.setQueryParameter(queryParameter.getName(), queryParameter.getEncodedValue()); } final URL url = urlBuilder.toUrl(); final HttpRequest request = configRequest(new HttpRequest(methodParser.getHttpMethod(), url), methodParser, args); for (final HttpHeader header : methodParser.setHeaders(args)) { request.setHeader(header.getName(), header.getValue()); } return request; } /** * Create a HttpRequest for the provided Swagger method using the provided arguments. * * @param methodParser the Swagger method parser to use * @param args the arguments to use to populate the method's annotation values * @return a HttpRequest * @throws IOException thrown if the body contents cannot be serialized */ private HttpRequest createHttpRequest(OperationDescription operationDescription, SwaggerMethodParser methodParser, Object[] args) throws IOException { final HttpRequest request = configRequest(new HttpRequest(methodParser.getHttpMethod(), operationDescription.getUrl()), methodParser, args); for (final String headerName : operationDescription.getHeaders().keySet()) { request.setHeader(headerName, operationDescription.getHeaders().get(headerName)); } return request; } @SuppressWarnings("unchecked") private HttpRequest configRequest(HttpRequest request, SwaggerMethodParser methodParser, Object[] args) throws IOException { final Object bodyContentObject = methodParser.setBody(args); if (bodyContentObject == null) { request.getHeaders().put("Content-Length", "0"); } else { String contentType = methodParser.getBodyContentType(); if (contentType == null || contentType.isEmpty()) { if (bodyContentObject instanceof byte[] || bodyContentObject instanceof String) { contentType = ContentType.APPLICATION_OCTET_STREAM; } else { contentType = ContentType.APPLICATION_JSON; } } request.getHeaders().put("Content-Type", contentType); boolean isJson = false; final String[] contentTypeParts = contentType.split(";"); for (String contentTypePart : contentTypeParts) { if (contentTypePart.trim().equalsIgnoreCase(ContentType.APPLICATION_JSON)) { isJson = true; break; } } if (isJson) { final String bodyContentString = serializer.serialize(bodyContentObject, SerializerEncoding.JSON); request.setBody(bodyContentString); } else if (FluxUtil.isFluxByteBuffer(methodParser.getBodyJavaType())) { request.setBody((Flux<ByteBuffer>) bodyContentObject); } else if (bodyContentObject instanceof byte[]) { request.setBody((byte[]) bodyContentObject); } else if (bodyContentObject instanceof String) { final String bodyContentString = (String) bodyContentObject; if (!bodyContentString.isEmpty()) { request.setBody(bodyContentString); } } else if (bodyContentObject instanceof ByteBuffer) { request.setBody(Flux.just((ByteBuffer) bodyContentObject)); } else { final String bodyContentString = serializer.serialize(bodyContentObject, SerializerEncoding.fromHeaders(request.getHeaders())); request.setBody(bodyContentString); } } return request; } private Mono<HttpDecodedResponse> ensureExpectedStatus(Mono<HttpDecodedResponse> asyncDecodedResponse, final SwaggerMethodParser methodParser) { return asyncDecodedResponse .flatMap(decodedHttpResponse -> ensureExpectedStatus(decodedHttpResponse, methodParser, null)); } private static Exception instantiateUnexpectedException(UnexpectedExceptionInformation exception, HttpResponse httpResponse, String responseContent, Object responseDecodedContent) { final int responseStatusCode = httpResponse.getStatusCode(); String contentType = httpResponse.getHeaderValue("Content-Type"); String bodyRepresentation; if ("application/octet-stream".equalsIgnoreCase(contentType)) { bodyRepresentation = "(" + httpResponse.getHeaderValue("Content-Length") + "-byte body)"; } else { bodyRepresentation = responseContent.isEmpty() ? "(empty body)" : "\"" + responseContent + "\""; } Exception result; try { final Constructor<? extends HttpResponseException> exceptionConstructor = exception.getExceptionType().getConstructor(String.class, HttpResponse.class, exception.getExceptionBodyType()); result = exceptionConstructor.newInstance("Status code " + responseStatusCode + ", " + bodyRepresentation, httpResponse, responseDecodedContent); } catch (ReflectiveOperationException e) { String message = "Status code " + responseStatusCode + ", but an instance of " + exception.getExceptionType().getCanonicalName() + " cannot be created." + " Response body: " + bodyRepresentation; result = new IOException(message, e); } return result; } /** * Create a publisher that (1) emits error if the provided response {@code decodedResponse} has * 'disallowed status code' OR (2) emits provided response if it's status code ia allowed. * * 'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser * or is in the int[] of additional allowed status codes. * * @param decodedResponse The HttpResponse to check. * @param methodParser The method parser that contains information about the service interface * method that initiated the HTTP request. * @param additionalAllowedStatusCodes Additional allowed status codes that are permitted based * on the context of the HTTP request. * @return An async-version of the provided decodedResponse. */ private Mono<HttpDecodedResponse> ensureExpectedStatus(final HttpDecodedResponse decodedResponse, final SwaggerMethodParser methodParser, int[] additionalAllowedStatusCodes) { final int responseStatusCode = decodedResponse.getSourceResponse().getStatusCode(); final Mono<HttpDecodedResponse> asyncResult; if (!methodParser.isExpectedResponseStatusCode(responseStatusCode, additionalAllowedStatusCodes)) { Mono<String> bodyAsString = decodedResponse.getSourceResponse().getBodyAsString(); asyncResult = bodyAsString.flatMap((Function<String, Mono<HttpDecodedResponse>>) responseContent -> { Mono<Object> decodedErrorBody = decodedResponse.getDecodedBody(); return decodedErrorBody .flatMap((Function<Object, Mono<HttpDecodedResponse>>) responseDecodedErrorObject -> { Throwable exception = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), responseContent, responseDecodedErrorObject); return Mono.error(exception); }) .switchIfEmpty(Mono.defer((Supplier<Mono<HttpDecodedResponse>>) () -> { Throwable exception = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), responseContent, null); return Mono.error(exception); })); }).switchIfEmpty(Mono.defer((Supplier<Mono<HttpDecodedResponse>>) () -> { Throwable exception = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), "", null); return Mono.error(exception); })); } else { asyncResult = Mono.just(decodedResponse); } return asyncResult; } private Mono<?> handleRestResponseReturnType(HttpDecodedResponse response, SwaggerMethodParser methodParser, Type entityType) { Mono<?> asyncResult; if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) { Type bodyType = TypeUtil.getRestResponseBodyType(entityType); if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) { asyncResult = response.getSourceResponse().getBody().ignoreElements() .then(createResponse(response, entityType, null)); } else { asyncResult = handleBodyReturnType(response, methodParser, bodyType) .flatMap((Function<Object, Mono<Response<?>>>) bodyAsObject -> createResponse(response, entityType, bodyAsObject)) .switchIfEmpty(Mono.defer((Supplier<Mono<Response<?>>>) () -> createResponse(response, entityType, null))); } } else { asyncResult = handleBodyReturnType(response, methodParser, entityType); } return asyncResult; } @SuppressWarnings("unchecked") private Mono<Response<?>> createResponse(HttpDecodedResponse response, Type entityType, Object bodyAsObject) { Class<? extends Response<?>> cls = (Class<? extends Response<?>>) TypeUtil.getRawClass(entityType); if (cls.equals(Response.class)) { cls = (Class<? extends Response<?>>) (Object) ResponseBase.class; } else if (cls.equals(PagedResponse.class)) { cls = (Class<? extends Response<?>>) (Object) PagedResponseBase.class; if (bodyAsObject != null && !TypeUtil.isTypeOrSubTypeOf(bodyAsObject.getClass(), Page.class)) { throw logger.logExceptionAsError(new RuntimeException( "Unable to create PagedResponse<T>. Body must be of a type that implements: " + Page.class)); } } Constructor<? extends Response<?>> ctr = this.responseConstructorsCache.get(cls); if (ctr != null) { return this.responseConstructorsCache.invoke(ctr, response, bodyAsObject); } else { return Mono.error(new RuntimeException("Cannot find suitable constructor for class " + cls)); } } private Mono<?> handleBodyReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { final int responseStatusCode = response.getSourceResponse().getStatusCode(); final HttpMethod httpMethod = methodParser.getHttpMethod(); final Type returnValueWireType = methodParser.getReturnValueWireType(); final Mono<?> asyncResult; if (httpMethod == HttpMethod.HEAD && (TypeUtil.isTypeOrSubTypeOf( entityType, Boolean.TYPE) || TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) { boolean isSuccess = (responseStatusCode / 100) == 2; asyncResult = Mono.just(isSuccess); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) { Mono<byte[]> responseBodyBytesAsync = response.getSourceResponse().getBodyAsByteArray(); if (returnValueWireType == Base64Url.class) { responseBodyBytesAsync = responseBodyBytesAsync.map(base64UrlBytes -> new Base64Url(base64UrlBytes).decodedBytes()); } asyncResult = responseBodyBytesAsync; } else if (FluxUtil.isFluxByteBuffer(entityType)) { asyncResult = Mono.just(response.getSourceResponse().getBody()); } else { asyncResult = response.getDecodedBody(); } return asyncResult; } private Object handleHttpResponse(final HttpRequest httpRequest, Mono<HttpDecodedResponse> asyncDecodedHttpResponse, SwaggerMethodParser methodParser, Type returnType, Context context) { return handleRestReturnType(asyncDecodedHttpResponse, methodParser, returnType, context); } private Object handleResumeOperation(HttpRequest httpRequest, OperationDescription operationDescription, SwaggerMethodParser methodParser, Type returnType, Context context) { throw logger.logExceptionAsError(Exceptions.propagate(new Exception( "The resume operation is not available in the base RestProxy class."))); } /** * Handle the provided asynchronous HTTP response and return the deserialized value. * * @param asyncHttpDecodedResponse the asynchronous HTTP response to the original HTTP request * @param methodParser the SwaggerMethodParser that the request originates from * @param returnType the type of value that will be returned * @param context Additional context that is passed through the Http pipeline during the service call. * @return the deserialized result */ private Object handleRestReturnType(Mono<HttpDecodedResponse> asyncHttpDecodedResponse, final SwaggerMethodParser methodParser, final Type returnType, Context context) { final Mono<HttpDecodedResponse> asyncExpectedResponse = ensureExpectedStatus(asyncHttpDecodedResponse, methodParser) .doOnEach(RestProxy::endTracingSpan) .subscriberContext(reactor.util.context.Context.of("TRACING_CONTEXT", context)); final Object result; if (TypeUtil.isTypeOrSubTypeOf(returnType, Mono.class)) { final Type monoTypeParam = TypeUtil.getTypeArgument(returnType); if (TypeUtil.isTypeOrSubTypeOf(monoTypeParam, Void.class)) { result = asyncExpectedResponse.then(); } else { result = asyncExpectedResponse.flatMap(response -> handleRestResponseReturnType(response, methodParser, monoTypeParam)); } } else if (FluxUtil.isFluxByteBuffer(returnType)) { result = asyncExpectedResponse.flatMapMany(ar -> ar.getSourceResponse().getBody()); } else if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType, Void.class)) { asyncExpectedResponse.block(); result = null; } else { result = asyncExpectedResponse .flatMap(httpResponse -> handleRestResponseReturnType(httpResponse, methodParser, returnType)) .block(); } return result; } private static void endTracingSpan(Signal<HttpDecodedResponse> signal) { if (signal.isOnComplete() || signal.isOnSubscribe()) { return; } reactor.util.context.Context context = signal.getContext(); Optional<Context> tracingContext = context.getOrEmpty("TRACING_CONTEXT"); if (!tracingContext.isPresent()) { return; } int statusCode = 0; HttpDecodedResponse httpDecodedResponse; Throwable throwable = null; if (signal.hasValue()) { httpDecodedResponse = signal.get(); statusCode = httpDecodedResponse.getSourceResponse().getStatusCode(); } else if (signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof HttpResponseException) { HttpResponseException exception = (HttpResponseException) throwable; statusCode = exception.getResponse().getStatusCode(); } } TracerProxy.end(statusCode, throwable, tracingContext.get()); } /** * Create an instance of the default serializer. * * @return the default serializer */ private static SerializerAdapter createDefaultSerializer() { return JacksonAdapter.createDefaultSerializerAdapter(); } /** * Create the default HttpPipeline. * * @return the default HttpPipeline */ private static HttpPipeline createDefaultPipeline() { return createDefaultPipeline((HttpPipelinePolicy) null); } /** * Create the default HttpPipeline. * @param credentialsPolicy the credentials policy factory to use to apply authentication to the * pipeline * @return the default HttpPipeline */ private static HttpPipeline createDefaultPipeline(HttpPipelinePolicy credentialsPolicy) { List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy()); policies.add(new RetryPolicy()); policies.add(new CookiePolicy()); if (credentialsPolicy != null) { policies.add(credentialsPolicy); } return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface) { return create(swaggerInterface, createDefaultPipeline(), createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) { return create(swaggerInterface, httpPipeline, createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http * requests * @param serializer the serializer that will be used to convert POJOs to and from request and * response bodies * @param <A> the type of the Swagger interface. * @return a proxy implementation of the provided Swagger interface */ @SuppressWarnings("unchecked") public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) { final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer); final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser); return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface}, restProxy); } }
class RestProxy implements InvocationHandler { private final ClientLogger logger = new ClientLogger(RestProxy.class); private final HttpPipeline httpPipeline; private final SerializerAdapter serializer; private final SwaggerInterfaceParser interfaceParser; private final HttpResponseDecoder decoder; private final ResponseConstructorsCache responseConstructorsCache; /** * Create a RestProxy. * * @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP * requests. * @param serializer the serializer that will be used to convert response bodies to POJOs. * @param interfaceParser the parser that contains information about the interface describing REST API methods * that this RestProxy "implements". */ private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) { this.httpPipeline = httpPipeline; this.serializer = serializer; this.interfaceParser = interfaceParser; this.decoder = new HttpResponseDecoder(this.serializer); this.responseConstructorsCache = new ResponseConstructorsCache(); } /** * Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger * interface that this RestProxy was created to "implement". * * @param method the method to get a SwaggerMethodParser for * @return the SwaggerMethodParser for the provided method */ private SwaggerMethodParser getMethodParser(Method method) { return interfaceParser.getMethodParser(method); } /** * Send the provided request asynchronously, applying any request policies provided to the HttpClient instance. * * @param request the HTTP request to send * @param contextData the context * @return a {@link Mono} that emits HttpResponse asynchronously */ public Mono<HttpResponse> send(HttpRequest request, Context contextData) { return httpPipeline.send(request, contextData); } @Override public Object invoke(Object proxy, final Method method, Object[] args) { try { final SwaggerMethodParser methodParser; final HttpRequest request; if (method.isAnnotationPresent(ResumeOperation.class)) { OperationDescription opDesc = CoreUtils.findFirstOfType(args, OperationDescription.class); Method resumeMethod = determineResumeMethod(method, opDesc.getMethodName()); methodParser = getMethodParser(resumeMethod); request = createHttpRequest(opDesc, methodParser, args); final Type returnType = methodParser.getReturnType(); return handleResumeOperation( request, opDesc, methodParser, returnType, startTracingSpan(resumeMethod, Context.NONE)); } else { methodParser = getMethodParser(method); request = createHttpRequest(methodParser, args); Context context = methodParser.setContext(args).addData("caller-method", methodParser.getFullyQualifiedMethodName()); context = startTracingSpan(method, context); if (request.getBody() != null) { request.setBody(validateLength(request)); } final Mono<HttpResponse> asyncResponse = send(request, context); Mono<HttpDecodedResponse> asyncDecodedResponse = this.decoder.decode(asyncResponse, methodParser); return handleHttpResponse(request, asyncDecodedResponse, methodParser, methodParser.getReturnType(), context); } } catch (IOException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } } private Flux<ByteBuffer> validateLength(final HttpRequest request) { final Flux<ByteBuffer> bbFlux = request.getBody(); if (bbFlux == null) { return Flux.empty(); } return Flux.defer(() -> { Long expectedLength = Long.valueOf(request.getHeaders().getValue("Content-Length")); final long[] currentTotalLength = new long[1]; return bbFlux.doOnEach(s -> { if (s.isOnNext()) { ByteBuffer byteBuffer = s.get(); int currentLength = (byteBuffer == null) ? 0 : byteBuffer.remaining(); currentTotalLength[0] += currentLength; if (currentTotalLength[0] > expectedLength) { throw logger.logExceptionAsError(new UnexpectedLengthException( String.format("Request body emitted %d bytes more than the expected %d bytes.", currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); } } else if (s.isOnComplete()) { if (expectedLength.compareTo(currentTotalLength[0]) != 0) { throw logger.logExceptionAsError(new UnexpectedLengthException( String.format("Request body emitted %d bytes less than the expected %d bytes.", currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); } } }); }); } private Method determineResumeMethod(Method method, String resumeMethodName) { for (Method potentialResumeMethod : method.getDeclaringClass().getMethods()) { if (potentialResumeMethod.getName().equals(resumeMethodName)) { return potentialResumeMethod; } } return null; } /** * Starts the tracing span for the current service call, additionally set metadata attributes on the span by passing * additional context information. * @param method Service method being called. * @param context Context information about the current service call. * @return The updated context containing the span context. */ /** * Create a HttpRequest for the provided Swagger method using the provided arguments. * * @param methodParser the Swagger method parser to use * @param args the arguments to use to populate the method's annotation values * @return a HttpRequest * @throws IOException thrown if the body contents cannot be serialized */ private HttpRequest createHttpRequest(SwaggerMethodParser methodParser, Object[] args) throws IOException { UrlBuilder urlBuilder; final String path = methodParser.setPath(args); final UrlBuilder pathUrlBuilder = UrlBuilder.parse(path); if (pathUrlBuilder.getScheme() != null) { urlBuilder = pathUrlBuilder; } else { urlBuilder = new UrlBuilder(); final String scheme = methodParser.setScheme(args); urlBuilder.setScheme(scheme); final String host = methodParser.setHost(args); urlBuilder.setHost(host); if (path != null && !path.isEmpty() && !path.equals("/")) { String hostPath = urlBuilder.getPath(); if (hostPath == null || hostPath.isEmpty() || hostPath.equals("/") || path.contains(": urlBuilder.setPath(path); } else { urlBuilder.setPath(hostPath + "/" + path); } } } for (final EncodedParameter queryParameter : methodParser.setEncodedQueryParameters(args)) { urlBuilder.setQueryParameter(queryParameter.getName(), queryParameter.getEncodedValue()); } final URL url = urlBuilder.toUrl(); final HttpRequest request = configRequest(new HttpRequest(methodParser.getHttpMethod(), url), methodParser, args); for (final HttpHeader header : methodParser.setHeaders(args)) { request.setHeader(header.getName(), header.getValue()); } return request; } /** * Create a HttpRequest for the provided Swagger method using the provided arguments. * * @param methodParser the Swagger method parser to use * @param args the arguments to use to populate the method's annotation values * @return a HttpRequest * @throws IOException thrown if the body contents cannot be serialized */ private HttpRequest createHttpRequest(OperationDescription operationDescription, SwaggerMethodParser methodParser, Object[] args) throws IOException { final HttpRequest request = configRequest(new HttpRequest(methodParser.getHttpMethod(), operationDescription.getUrl()), methodParser, args); for (final String headerName : operationDescription.getHeaders().keySet()) { request.setHeader(headerName, operationDescription.getHeaders().get(headerName)); } return request; } @SuppressWarnings("unchecked") private HttpRequest configRequest(HttpRequest request, SwaggerMethodParser methodParser, Object[] args) throws IOException { final Object bodyContentObject = methodParser.setBody(args); if (bodyContentObject == null) { request.getHeaders().put("Content-Length", "0"); } else { String contentType = methodParser.getBodyContentType(); if (contentType == null || contentType.isEmpty()) { if (bodyContentObject instanceof byte[] || bodyContentObject instanceof String) { contentType = ContentType.APPLICATION_OCTET_STREAM; } else { contentType = ContentType.APPLICATION_JSON; } } request.getHeaders().put("Content-Type", contentType); boolean isJson = false; final String[] contentTypeParts = contentType.split(";"); for (String contentTypePart : contentTypeParts) { if (contentTypePart.trim().equalsIgnoreCase(ContentType.APPLICATION_JSON)) { isJson = true; break; } } if (isJson) { final String bodyContentString = serializer.serialize(bodyContentObject, SerializerEncoding.JSON); request.setBody(bodyContentString); } else if (FluxUtil.isFluxByteBuffer(methodParser.getBodyJavaType())) { request.setBody((Flux<ByteBuffer>) bodyContentObject); } else if (bodyContentObject instanceof byte[]) { request.setBody((byte[]) bodyContentObject); } else if (bodyContentObject instanceof String) { final String bodyContentString = (String) bodyContentObject; if (!bodyContentString.isEmpty()) { request.setBody(bodyContentString); } } else if (bodyContentObject instanceof ByteBuffer) { request.setBody(Flux.just((ByteBuffer) bodyContentObject)); } else { final String bodyContentString = serializer.serialize(bodyContentObject, SerializerEncoding.fromHeaders(request.getHeaders())); request.setBody(bodyContentString); } } return request; } private Mono<HttpDecodedResponse> ensureExpectedStatus(Mono<HttpDecodedResponse> asyncDecodedResponse, final SwaggerMethodParser methodParser) { return asyncDecodedResponse .flatMap(decodedHttpResponse -> ensureExpectedStatus(decodedHttpResponse, methodParser, null)); } private static Exception instantiateUnexpectedException(UnexpectedExceptionInformation exception, HttpResponse httpResponse, String responseContent, Object responseDecodedContent) { final int responseStatusCode = httpResponse.getStatusCode(); String contentType = httpResponse.getHeaderValue("Content-Type"); String bodyRepresentation; if ("application/octet-stream".equalsIgnoreCase(contentType)) { bodyRepresentation = "(" + httpResponse.getHeaderValue("Content-Length") + "-byte body)"; } else { bodyRepresentation = responseContent.isEmpty() ? "(empty body)" : "\"" + responseContent + "\""; } Exception result; try { final Constructor<? extends HttpResponseException> exceptionConstructor = exception.getExceptionType().getConstructor(String.class, HttpResponse.class, exception.getExceptionBodyType()); result = exceptionConstructor.newInstance("Status code " + responseStatusCode + ", " + bodyRepresentation, httpResponse, responseDecodedContent); } catch (ReflectiveOperationException e) { String message = "Status code " + responseStatusCode + ", but an instance of " + exception.getExceptionType().getCanonicalName() + " cannot be created." + " Response body: " + bodyRepresentation; result = new IOException(message, e); } return result; } /** * Create a publisher that (1) emits error if the provided response {@code decodedResponse} has * 'disallowed status code' OR (2) emits provided response if it's status code ia allowed. * * 'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser * or is in the int[] of additional allowed status codes. * * @param decodedResponse The HttpResponse to check. * @param methodParser The method parser that contains information about the service interface * method that initiated the HTTP request. * @param additionalAllowedStatusCodes Additional allowed status codes that are permitted based * on the context of the HTTP request. * @return An async-version of the provided decodedResponse. */ private Mono<HttpDecodedResponse> ensureExpectedStatus(final HttpDecodedResponse decodedResponse, final SwaggerMethodParser methodParser, int[] additionalAllowedStatusCodes) { final int responseStatusCode = decodedResponse.getSourceResponse().getStatusCode(); final Mono<HttpDecodedResponse> asyncResult; if (!methodParser.isExpectedResponseStatusCode(responseStatusCode, additionalAllowedStatusCodes)) { Mono<String> bodyAsString = decodedResponse.getSourceResponse().getBodyAsString(); asyncResult = bodyAsString.flatMap((Function<String, Mono<HttpDecodedResponse>>) responseContent -> { Mono<Object> decodedErrorBody = decodedResponse.getDecodedBody(); return decodedErrorBody .flatMap((Function<Object, Mono<HttpDecodedResponse>>) responseDecodedErrorObject -> { Throwable exception = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), responseContent, responseDecodedErrorObject); return Mono.error(exception); }) .switchIfEmpty(Mono.defer((Supplier<Mono<HttpDecodedResponse>>) () -> { Throwable exception = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), responseContent, null); return Mono.error(exception); })); }).switchIfEmpty(Mono.defer((Supplier<Mono<HttpDecodedResponse>>) () -> { Throwable exception = instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), "", null); return Mono.error(exception); })); } else { asyncResult = Mono.just(decodedResponse); } return asyncResult; } private Mono<?> handleRestResponseReturnType(HttpDecodedResponse response, SwaggerMethodParser methodParser, Type entityType) { Mono<?> asyncResult; if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) { Type bodyType = TypeUtil.getRestResponseBodyType(entityType); if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) { asyncResult = response.getSourceResponse().getBody().ignoreElements() .then(createResponse(response, entityType, null)); } else { asyncResult = handleBodyReturnType(response, methodParser, bodyType) .flatMap((Function<Object, Mono<Response<?>>>) bodyAsObject -> createResponse(response, entityType, bodyAsObject)) .switchIfEmpty(Mono.defer((Supplier<Mono<Response<?>>>) () -> createResponse(response, entityType, null))); } } else { asyncResult = handleBodyReturnType(response, methodParser, entityType); } return asyncResult; } @SuppressWarnings("unchecked") private Mono<Response<?>> createResponse(HttpDecodedResponse response, Type entityType, Object bodyAsObject) { Class<? extends Response<?>> cls = (Class<? extends Response<?>>) TypeUtil.getRawClass(entityType); if (cls.equals(Response.class)) { cls = (Class<? extends Response<?>>) (Object) ResponseBase.class; } else if (cls.equals(PagedResponse.class)) { cls = (Class<? extends Response<?>>) (Object) PagedResponseBase.class; if (bodyAsObject != null && !TypeUtil.isTypeOrSubTypeOf(bodyAsObject.getClass(), Page.class)) { throw logger.logExceptionAsError(new RuntimeException( "Unable to create PagedResponse<T>. Body must be of a type that implements: " + Page.class)); } } Constructor<? extends Response<?>> ctr = this.responseConstructorsCache.get(cls); if (ctr != null) { return this.responseConstructorsCache.invoke(ctr, response, bodyAsObject); } else { return Mono.error(new RuntimeException("Cannot find suitable constructor for class " + cls)); } } private Mono<?> handleBodyReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { final int responseStatusCode = response.getSourceResponse().getStatusCode(); final HttpMethod httpMethod = methodParser.getHttpMethod(); final Type returnValueWireType = methodParser.getReturnValueWireType(); final Mono<?> asyncResult; if (httpMethod == HttpMethod.HEAD && (TypeUtil.isTypeOrSubTypeOf( entityType, Boolean.TYPE) || TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) { boolean isSuccess = (responseStatusCode / 100) == 2; asyncResult = Mono.just(isSuccess); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) { Mono<byte[]> responseBodyBytesAsync = response.getSourceResponse().getBodyAsByteArray(); if (returnValueWireType == Base64Url.class) { responseBodyBytesAsync = responseBodyBytesAsync.map(base64UrlBytes -> new Base64Url(base64UrlBytes).decodedBytes()); } asyncResult = responseBodyBytesAsync; } else if (FluxUtil.isFluxByteBuffer(entityType)) { asyncResult = Mono.just(response.getSourceResponse().getBody()); } else { asyncResult = response.getDecodedBody(); } return asyncResult; } private Object handleHttpResponse(final HttpRequest httpRequest, Mono<HttpDecodedResponse> asyncDecodedHttpResponse, SwaggerMethodParser methodParser, Type returnType, Context context) { return handleRestReturnType(asyncDecodedHttpResponse, methodParser, returnType, context); } private Object handleResumeOperation(HttpRequest httpRequest, OperationDescription operationDescription, SwaggerMethodParser methodParser, Type returnType, Context context) { throw logger.logExceptionAsError(Exceptions.propagate(new Exception( "The resume operation is not available in the base RestProxy class."))); } /** * Handle the provided asynchronous HTTP response and return the deserialized value. * * @param asyncHttpDecodedResponse the asynchronous HTTP response to the original HTTP request * @param methodParser the SwaggerMethodParser that the request originates from * @param returnType the type of value that will be returned * @param context Additional context that is passed through the Http pipeline during the service call. * @return the deserialized result */ private Object handleRestReturnType(Mono<HttpDecodedResponse> asyncHttpDecodedResponse, final SwaggerMethodParser methodParser, final Type returnType, Context context) { final Mono<HttpDecodedResponse> asyncExpectedResponse = ensureExpectedStatus(asyncHttpDecodedResponse, methodParser) .doOnEach(RestProxy::endTracingSpan) .subscriberContext(reactor.util.context.Context.of("TRACING_CONTEXT", context)); final Object result; if (TypeUtil.isTypeOrSubTypeOf(returnType, Mono.class)) { final Type monoTypeParam = TypeUtil.getTypeArgument(returnType); if (TypeUtil.isTypeOrSubTypeOf(monoTypeParam, Void.class)) { result = asyncExpectedResponse.then(); } else { result = asyncExpectedResponse.flatMap(response -> handleRestResponseReturnType(response, methodParser, monoTypeParam)); } } else if (FluxUtil.isFluxByteBuffer(returnType)) { result = asyncExpectedResponse.flatMapMany(ar -> ar.getSourceResponse().getBody()); } else if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType, Void.class)) { asyncExpectedResponse.block(); result = null; } else { result = asyncExpectedResponse .flatMap(httpResponse -> handleRestResponseReturnType(httpResponse, methodParser, returnType)) .block(); } return result; } private static void endTracingSpan(Signal<HttpDecodedResponse> signal) { if (signal.isOnComplete() || signal.isOnSubscribe()) { return; } reactor.util.context.Context context = signal.getContext(); Optional<Context> tracingContext = context.getOrEmpty("TRACING_CONTEXT"); if (!tracingContext.isPresent()) { return; } int statusCode = 0; HttpDecodedResponse httpDecodedResponse; Throwable throwable = null; if (signal.hasValue()) { httpDecodedResponse = signal.get(); statusCode = httpDecodedResponse.getSourceResponse().getStatusCode(); } else if (signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof HttpResponseException) { HttpResponseException exception = (HttpResponseException) throwable; statusCode = exception.getResponse().getStatusCode(); } } TracerProxy.end(statusCode, throwable, tracingContext.get()); } /** * Create an instance of the default serializer. * * @return the default serializer */ private static SerializerAdapter createDefaultSerializer() { return JacksonAdapter.createDefaultSerializerAdapter(); } /** * Create the default HttpPipeline. * * @return the default HttpPipeline */ private static HttpPipeline createDefaultPipeline() { return createDefaultPipeline((HttpPipelinePolicy) null); } /** * Create the default HttpPipeline. * @param credentialsPolicy the credentials policy factory to use to apply authentication to the * pipeline * @return the default HttpPipeline */ private static HttpPipeline createDefaultPipeline(HttpPipelinePolicy credentialsPolicy) { List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy()); policies.add(new RetryPolicy()); policies.add(new CookiePolicy()); if (credentialsPolicy != null) { policies.add(credentialsPolicy); } return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface) { return create(swaggerInterface, createDefaultPipeline(), createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) { return create(swaggerInterface, httpPipeline, createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http * requests * @param serializer the serializer that will be used to convert POJOs to and from request and * response bodies * @param <A> the type of the Swagger interface. * @return a proxy implementation of the provided Swagger interface */ @SuppressWarnings("unchecked") public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) { final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer); final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser); return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface}, restProxy); } }
this should be logged properly. in Certificate Properties too.
void unpackId(String id) { if (id != null && id.length() > 0) { this.id = id; try { URL url = new URL(id); String[] tokens = url.getPath().split("/"); this.vaultUrl = (tokens.length >= 1 ? tokens[1] : null); this.name = (tokens.length >= 3 ? tokens[2] : null); } catch (MalformedURLException e) { e.printStackTrace(); } } }
e.printStackTrace();
void unpackId(String id) { if (id != null && id.length() > 0) { this.id = id; try { URL url = new URL(id); String[] tokens = url.getPath().split("/"); this.vaultUrl = (tokens.length >= 2 ? tokens[1] : null); this.name = (tokens.length >= 3 ? tokens[2] : null); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new IllegalArgumentException("The Azure Key Vault endpoint url is malformed.", e)); } } }
class CertificateOperation { /** * URL for the Azure KeyVault service. */ private String vaultUrl; /** * The Certificate name. */ private String name; /** * The certificate id. */ @JsonProperty(value = "id", access = JsonProperty.Access.WRITE_ONLY) private String id; /** * Name of the referenced issuer object or reserved names; for example, * 'Self' or 'Unknown'. */ private String issuerName; /** * Type of certificate to be requested from the issuer provider. */ private String certificateType; /** * Indicates if the certificates generated under this policy should be * published to certificate transparency logs. */ private boolean certificateTransparency; /** * The certificate signing request (CSR) that is being used in the * certificate operation. */ @JsonProperty(value = "csr") private byte[] csr; /** * Indicates if cancellation was requested on the certificate operation. */ @JsonProperty(value = "cancellation_requested") private Boolean cancellationRequested; /** * Status of the certificate operation. */ @JsonProperty(value = "status") private String status; /** * The status details of the certificate operation. */ @JsonProperty(value = "status_details") private String statusDetails; /** * Error encountered, if any, during the certificate operation. */ @JsonProperty(value = "error") private CertificateOperationError error; /** * Location which contains the result of the certificate operation. */ @JsonProperty(value = "target") private String target; /** * Identifier for the certificate operation. */ @JsonProperty(value = "request_id") private String requestId; /** * Get the identifier. * * @return the identifier. */ public String getId() { return this.id; } /** * Get the issuer name. * * @return the issuer name */ public String getIssuerName() { return this.issuerName; } /** * Get the certificate type. * * @return the certificateType */ public String getCertificateType() { return this.certificateType; } /** * Get the certificate transparency status. * * @return the certificateTransparency status. */ public boolean isCertificateTransparent() { return this.certificateTransparency; } /** * Get the csr. * * @return the csr. */ public byte[] getCsr() { return CoreUtils.clone(this.csr); } /** * Get the cancellation requested status. * * @return the cancellationRequested status. */ public Boolean getCancellationRequested() { return this.cancellationRequested; } /** * Get the status. * * @return the status */ public String getStatus() { return this.status; } /** * Get the status details. * * @return the status details */ public String getStatusDetails() { return this.statusDetails; } /** * Get the error. * * @return the error */ public CertificateOperationError getError() { return this.error; } /** * Get the target. * * @return the target */ public String getTarget() { return this.target; } /** * Get the requestId. * * @return the requestId */ public String getRequestId() { return this.requestId; } /** * Get the URL for the Azure KeyVault service. * * @return the value of the URL for the Azure KeyVault service. */ public String getVaultUrl() { return this.vaultUrl; } /** * Get the certificate name. * * @return the name of the certificate. */ public String getName() { return this.name; } @JsonProperty("issuer") private void unpackIssuerParameters(Map<String, Object> issuerParameters) { issuerName = (String) issuerParameters.get("name"); certificateType = (String) issuerParameters.get("cty"); certificateTransparency = issuerParameters.get("cert_transparency") != null ? (Boolean) issuerParameters.get("cert_transparency") : false; } @JsonProperty(value = "id") }
class CertificateOperation { private final ClientLogger logger = new ClientLogger(CertificateOperation.class); /** * URL for the Azure KeyVault service. */ private String vaultUrl; /** * The Certificate name. */ private String name; /** * The certificate id. */ @JsonProperty(value = "id", access = JsonProperty.Access.WRITE_ONLY) private String id; /** * Name of the referenced issuer object or reserved names; for example, * 'Self' or 'Unknown'. */ private String issuerName; /** * Type of certificate to be requested from the issuer provider. */ private String certificateType; /** * Indicates if the certificates generated under this policy should be * published to certificate transparency logs. */ private boolean certificateTransparency; /** * The certificate signing request (CSR) that is being used in the * certificate operation. */ @JsonProperty(value = "csr") private byte[] csr; /** * Indicates if cancellation was requested on the certificate operation. */ @JsonProperty(value = "cancellation_requested") private Boolean cancellationRequested; /** * Status of the certificate operation. */ @JsonProperty(value = "status") private String status; /** * The status details of the certificate operation. */ @JsonProperty(value = "status_details") private String statusDetails; /** * Error encountered, if any, during the certificate operation. */ @JsonProperty(value = "error") private CertificateOperationError error; /** * Location which contains the result of the certificate operation. */ @JsonProperty(value = "target") private String target; /** * Identifier for the certificate operation. */ @JsonProperty(value = "request_id") private String requestId; /** * Get the identifier. * * @return the identifier. */ public String getId() { return this.id; } /** * Get the issuer name. * * @return the issuer name */ public String getIssuerName() { return this.issuerName; } /** * Get the certificate type. * * @return the certificateType */ public String getCertificateType() { return this.certificateType; } /** * Get the certificate transparency status. * * @return the certificateTransparency status. */ public boolean isCertificateTransparent() { return this.certificateTransparency; } /** * Get the csr. * * @return the csr. */ public byte[] getCsr() { return CoreUtils.clone(this.csr); } /** * Get the cancellation requested status. * * @return the cancellationRequested status. */ public Boolean getCancellationRequested() { return this.cancellationRequested; } /** * Get the status. * * @return the status */ public String getStatus() { return this.status; } /** * Get the status details. * * @return the status details */ public String getStatusDetails() { return this.statusDetails; } /** * Get the error. * * @return the error */ public CertificateOperationError getError() { return this.error; } /** * Get the target. * * @return the target */ public String getTarget() { return this.target; } /** * Get the requestId. * * @return the requestId */ public String getRequestId() { return this.requestId; } /** * Get the URL for the Azure KeyVault service. * * @return the value of the URL for the Azure KeyVault service. */ public String getVaultUrl() { return this.vaultUrl; } /** * Get the certificate name. * * @return the name of the certificate. */ public String getName() { return this.name; } @JsonProperty("issuer") private void unpackIssuerParameters(Map<String, Object> issuerParameters) { issuerName = (String) issuerParameters.get("name"); certificateType = (String) issuerParameters.get("cty"); certificateTransparency = issuerParameters.get("cert_transparency") != null ? (Boolean) issuerParameters.get("cert_transparency") : false; } @JsonProperty(value = "id") }
tokens.length >= 2 ? .....
void unpackId(String id) { if (id != null && id.length() > 0) { this.id = id; try { URL url = new URL(id); String[] tokens = url.getPath().split("/"); this.vaultUrl = (tokens.length >= 1 ? tokens[1] : null); this.name = (tokens.length >= 3 ? tokens[2] : null); this.version = (tokens.length >= 4 ? tokens[3] : null); } catch (MalformedURLException e) { e.printStackTrace(); } } }
this.vaultUrl = (tokens.length >= 1 ? tokens[1] : null);
void unpackId(String id) { if (id != null && id.length() > 0) { this.id = id; try { URL url = new URL(id); String[] tokens = url.getPath().split("/"); this.vaultUrl = (tokens.length >= 2 ? tokens[1] : null); this.name = (tokens.length >= 3 ? tokens[2] : null); this.version = (tokens.length >= 4 ? tokens[3] : null); } catch (MalformedURLException e) { throw logger.logExceptionAsError(new IllegalArgumentException("The Azure Key Vault endpoint url is malformed.", e)); } } }
class CertificateProperties { /** * URL for the Azure KeyVault service. */ private String vaultUrl; /** * Determines whether the object is enabled. */ private Boolean enabled; /** * Not before date in UTC. */ private OffsetDateTime notBefore; /** * The certificate version. */ String version; /** * Expiry date in UTC. */ private OffsetDateTime expiresOn; /** * Creation time in UTC. */ private OffsetDateTime createdOn; /** * Last updated time in UTC. */ private OffsetDateTime updatedOn; /** * Reflects the deletion recovery level currently in effect for certificates in * the current vault. If it contains 'Purgeable', the certificate can be * permanently deleted by a privileged user; otherwise, only the system can * purge the certificate, at the end of the retention interval. Possible values * include: 'Purgeable', 'Recoverable+Purgeable', 'Recoverable', * 'Recoverable+ProtectedSubscription'. */ private String recoveryLevel; /** * The Certificate name. */ String name; /** * The certificate id. */ @JsonProperty(value = "id", access = JsonProperty.Access.WRITE_ONLY) private String id; /** * Application specific metadata in the form of key-value pairs. */ @JsonProperty(value = "tags") Map<String, String> tags; /** * Thumbprint of the certificate. Read Only */ @JsonProperty(value = "x5t", access = JsonProperty.Access.WRITE_ONLY) Base64Url x509Thumbprint; CertificateProperties(String name) { this.name = name; } CertificateProperties() { } /** * Get the certificate identifier. * * @return the certificate identifier */ public String getId() { return this.id; } /** * Get the notBefore UTC time. * * @return the notBefore UTC time. */ public OffsetDateTime getNotBefore() { return notBefore; } /** * Get the Certificate Expiry time in UTC. * * @return the expires UTC time. */ public OffsetDateTime getExpiresOn() { return this.expiresOn; } /** * Get the the UTC time at which certificate was created. * * @return the created UTC time. */ public OffsetDateTime getCreatedOn() { return createdOn; } /** * Get the UTC time at which certificate was last updated. * * @return the last updated UTC time. */ public OffsetDateTime getUpdatedOn() { return updatedOn; } /** * Get the tags associated with the certificate. * * @return the value of the tags. */ public Map<String, String> getTags() { return this.tags; } /** * Get the URL for the Azure KeyVault service. * * @return the value of the URL for the Azure KeyVault service. */ public String getVaultUrl() { return this.vaultUrl; } /** * Set the tags to be associated with the certificate. * * @param tags The tags to set * @return the CertificateProperties object itself. */ public CertificateProperties setTags(Map<String, String> tags) { this.tags = tags; return this; } /** * Get the version of the certificate. * * @return the version of the certificate. */ public String getVersion() { return this.version; } /** * Get the certificate name. * * @return the name of the certificate. */ public String getName() { return this.name; } /** * Get the recovery level of the certificate. * @return the recoveryLevel of the certificate. */ public String getRecoveryLevel() { return recoveryLevel; } /** * Get the enabled status. * * @return the enabled status */ public Boolean isEnabled() { return this.enabled; } /** * Set the enabled status. * @param enabled The enabled status to set. * @return the CertificateProperties object itself. */ public CertificateProperties setEnabled(Boolean enabled) { this.enabled = enabled; return this; } /** * Get the X509 Thumbprint of the certificate. * @return the x509Thumbprint. */ public byte[] getX509Thumbprint() { if (x509Thumbprint != null) { return this.x509Thumbprint.decodedBytes(); } return null; } @JsonProperty("attributes") @SuppressWarnings("unchecked") void unpackBaseAttributes(Map<String, Object> attributes) { this.enabled = (Boolean) attributes.get("enabled"); this.notBefore = epochToOffsetDateTime(attributes.get("nbf")); this.expiresOn = epochToOffsetDateTime(attributes.get("exp")); this.createdOn = epochToOffsetDateTime(attributes.get("created")); this.updatedOn = epochToOffsetDateTime(attributes.get("updated")); this.recoveryLevel = (String) attributes.get("recoveryLevel"); this.tags = (Map<String, String>) lazyValueSelection(attributes.get("tags"), this.tags); unpackId((String) attributes.get("id")); } private OffsetDateTime epochToOffsetDateTime(Object epochValue) { if (epochValue != null) { Instant instant = Instant.ofEpochMilli(((Number) epochValue).longValue() * 1000L); return OffsetDateTime.ofInstant(instant, ZoneOffset.UTC); } return null; } @JsonProperty(value = "id") private Object lazyValueSelection(Object input1, Object input2) { if (input1 == null) { return input2; } return input1; } }
class CertificateProperties { private final ClientLogger logger = new ClientLogger(CertificateProperties.class); /** * URL for the Azure KeyVault service. */ private String vaultUrl; /** * Determines whether the object is enabled. */ private Boolean enabled; /** * Not before date in UTC. */ private OffsetDateTime notBefore; /** * The certificate version. */ String version; /** * Expiry date in UTC. */ private OffsetDateTime expiresOn; /** * Creation time in UTC. */ private OffsetDateTime createdOn; /** * Last updated time in UTC. */ private OffsetDateTime updatedOn; /** * Reflects the deletion recovery level currently in effect for certificates in * the current vault. If it contains 'Purgeable', the certificate can be * permanently deleted by a privileged user; otherwise, only the system can * purge the certificate, at the end of the retention interval. Possible values * include: 'Purgeable', 'Recoverable+Purgeable', 'Recoverable', * 'Recoverable+ProtectedSubscription'. */ private String recoveryLevel; /** * The Certificate name. */ String name; /** * The certificate id. */ @JsonProperty(value = "id", access = JsonProperty.Access.WRITE_ONLY) private String id; /** * Application specific metadata in the form of key-value pairs. */ @JsonProperty(value = "tags") Map<String, String> tags; /** * Thumbprint of the certificate. Read Only */ @JsonProperty(value = "x5t", access = JsonProperty.Access.WRITE_ONLY) Base64Url x509Thumbprint; CertificateProperties(String name) { this.name = name; } CertificateProperties() { } /** * Get the certificate identifier. * * @return the certificate identifier */ public String getId() { return this.id; } /** * Get the notBefore UTC time. * * @return the notBefore UTC time. */ public OffsetDateTime getNotBefore() { return notBefore; } /** * Get the Certificate Expiry time in UTC. * * @return the expires UTC time. */ public OffsetDateTime getExpiresOn() { return this.expiresOn; } /** * Get the the UTC time at which certificate was created. * * @return the created UTC time. */ public OffsetDateTime getCreatedOn() { return createdOn; } /** * Get the UTC time at which certificate was last updated. * * @return the last updated UTC time. */ public OffsetDateTime getUpdatedOn() { return updatedOn; } /** * Get the tags associated with the certificate. * * @return the value of the tags. */ public Map<String, String> getTags() { return this.tags; } /** * Get the URL for the Azure KeyVault service. * * @return the value of the URL for the Azure KeyVault service. */ public String getVaultUrl() { return this.vaultUrl; } /** * Set the tags to be associated with the certificate. * * @param tags The tags to set * @return the CertificateProperties object itself. */ public CertificateProperties setTags(Map<String, String> tags) { this.tags = tags; return this; } /** * Get the version of the certificate. * * @return the version of the certificate. */ public String getVersion() { return this.version; } /** * Get the certificate name. * * @return the name of the certificate. */ public String getName() { return this.name; } /** * Get the recovery level of the certificate. * @return the recoveryLevel of the certificate. */ public String getRecoveryLevel() { return recoveryLevel; } /** * Get the enabled status. * * @return the enabled status */ public Boolean isEnabled() { return this.enabled; } /** * Set the enabled status. * @param enabled The enabled status to set. * @return the CertificateProperties object itself. */ public CertificateProperties setEnabled(Boolean enabled) { this.enabled = enabled; return this; } /** * Get the X509 Thumbprint of the certificate. * @return the x509Thumbprint. */ public byte[] getX509Thumbprint() { if (x509Thumbprint != null) { return this.x509Thumbprint.decodedBytes(); } return null; } @JsonProperty("attributes") @SuppressWarnings("unchecked") void unpackBaseAttributes(Map<String, Object> attributes) { this.enabled = (Boolean) attributes.get("enabled"); this.notBefore = epochToOffsetDateTime(attributes.get("nbf")); this.expiresOn = epochToOffsetDateTime(attributes.get("exp")); this.createdOn = epochToOffsetDateTime(attributes.get("created")); this.updatedOn = epochToOffsetDateTime(attributes.get("updated")); this.recoveryLevel = (String) attributes.get("recoveryLevel"); this.tags = (Map<String, String>) lazyValueSelection(attributes.get("tags"), this.tags); unpackId((String) attributes.get("id")); } private OffsetDateTime epochToOffsetDateTime(Object epochValue) { if (epochValue != null) { Instant instant = Instant.ofEpochMilli(((Number) epochValue).longValue() * 1000L); return OffsetDateTime.ofInstant(instant, ZoneOffset.UTC); } return null; } @JsonProperty(value = "id") private Object lazyValueSelection(Object input1, Object input2) { if (input1 == null) { return input2; } return input1; } }
This is kind of heuristic but general idea is that if there are 100_000 documents of size 1KB, that means 100_000*1KB=100MB in memory which will start to put pressure on memory if we go beyond that.
private int fluxSequentialMergePrefetch(FeedOptions options, int numberOfPartitions, int pageSize, int fluxConcurrency) { int maxBufferedItemCount = options.getMaxBufferedItemCount(); if (maxBufferedItemCount <= 0) { maxBufferedItemCount = Math.min(Configs.CPU_CNT * numberOfPartitions * pageSize, 100_000); } int fluxPrefetch = Math.max(maxBufferedItemCount / (Math.max(fluxConcurrency * pageSize, 1)), 1); return Math.min(fluxPrefetch, Queues.XS_BUFFER_SIZE); }
maxBufferedItemCount = Math.min(Configs.CPU_CNT * numberOfPartitions * pageSize, 100_000);
private int fluxSequentialMergePrefetch(FeedOptions options, int numberOfPartitions, int pageSize, int fluxConcurrency) { int maxBufferedItemCount = options.getMaxBufferedItemCount(); if (maxBufferedItemCount <= 0) { maxBufferedItemCount = Math.min(Configs.getCPUCnt() * numberOfPartitions * pageSize, 100_000); } int fluxPrefetch = Math.max(maxBufferedItemCount / (Math.max(fluxConcurrency * pageSize, 1)), 1); return Math.min(fluxPrefetch, Queues.XS_BUFFER_SIZE); }
class EmptyPagesFilterTransformer<T extends Resource> implements Function<Flux<DocumentProducer<T>.DocumentProducerFeedResponse>, Flux<FeedResponse<T>>> { private final RequestChargeTracker tracker; private DocumentProducer<T>.DocumentProducerFeedResponse previousPage; public EmptyPagesFilterTransformer( RequestChargeTracker tracker) { if (tracker == null) { throw new IllegalArgumentException("Request Charge Tracker must not be null."); } this.tracker = tracker; this.previousPage = null; } private DocumentProducer<T>.DocumentProducerFeedResponse plusCharge( DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse, double charge) { FeedResponse<T> page = documentProducerFeedResponse.pageResult; Map<String, String> headers = new HashMap<>(page.getResponseHeaders()); double pageCharge = page.getRequestCharge(); pageCharge += charge; headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, String.valueOf(pageCharge)); FeedResponse<T> newPage = BridgeInternal.createFeedResponseWithQueryMetrics(page.getResults(), headers, BridgeInternal.queryMetricsFromFeedResponse(page)); documentProducerFeedResponse.pageResult = newPage; return documentProducerFeedResponse; } private DocumentProducer<T>.DocumentProducerFeedResponse addCompositeContinuationToken( DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse, String compositeContinuationToken) { FeedResponse<T> page = documentProducerFeedResponse.pageResult; Map<String, String> headers = new HashMap<>(page.getResponseHeaders()); headers.put(HttpConstants.HttpHeaders.CONTINUATION, compositeContinuationToken); FeedResponse<T> newPage = BridgeInternal.createFeedResponseWithQueryMetrics(page.getResults(), headers, BridgeInternal.queryMetricsFromFeedResponse(page)); documentProducerFeedResponse.pageResult = newPage; return documentProducerFeedResponse; } private static Map<String, String> headerResponse( double requestCharge) { return Utils.immutableMapOf(HttpConstants.HttpHeaders.REQUEST_CHARGE, String.valueOf(requestCharge)); } @Override public Flux<FeedResponse<T>> apply(Flux<DocumentProducer<T>.DocumentProducerFeedResponse> source) { return source.filter(documentProducerFeedResponse -> { if (documentProducerFeedResponse.pageResult.getResults().isEmpty()) { tracker.addCharge(documentProducerFeedResponse.pageResult.getRequestCharge()); return false; } return true; }).map(documentProducerFeedResponse -> { double charge = tracker.getAndResetCharge(); if (charge > 0) { return new ValueHolder<>(plusCharge(documentProducerFeedResponse, charge)); } else { return new ValueHolder<>(documentProducerFeedResponse); } }).concatWith(Flux.just(new ValueHolder<>(null))).map(heldValue -> { DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse = heldValue.v; ImmutablePair<DocumentProducer<T>.DocumentProducerFeedResponse, DocumentProducer<T>.DocumentProducerFeedResponse> previousCurrent = new ImmutablePair<>( this.previousPage, documentProducerFeedResponse); this.previousPage = documentProducerFeedResponse; return previousCurrent; }).skip(1).map(currentNext -> { DocumentProducer<T>.DocumentProducerFeedResponse current = currentNext.left; DocumentProducer<T>.DocumentProducerFeedResponse next = currentNext.right; String compositeContinuationToken; String backendContinuationToken = current.pageResult.getContinuationToken(); if (backendContinuationToken == null) { if (next == null) { compositeContinuationToken = null; } else { CompositeContinuationToken compositeContinuationTokenDom = new CompositeContinuationToken(null, next.sourcePartitionKeyRange.toRange()); compositeContinuationToken = compositeContinuationTokenDom.toJson(); } } else { CompositeContinuationToken compositeContinuationTokenDom = new CompositeContinuationToken( backendContinuationToken, current.sourcePartitionKeyRange.toRange()); compositeContinuationToken = compositeContinuationTokenDom.toJson(); } DocumentProducer<T>.DocumentProducerFeedResponse page; page = current; page = this.addCompositeContinuationToken(page, compositeContinuationToken); return page; }).map(documentProducerFeedResponse -> { return documentProducerFeedResponse.pageResult; }).switchIfEmpty(Flux.defer(() -> { return Flux.just(BridgeInternal.createFeedResponse(Utils.immutableListOf(), headerResponse(tracker.getAndResetCharge()))); })); } }
class EmptyPagesFilterTransformer<T extends Resource> implements Function<Flux<DocumentProducer<T>.DocumentProducerFeedResponse>, Flux<FeedResponse<T>>> { private final RequestChargeTracker tracker; private DocumentProducer<T>.DocumentProducerFeedResponse previousPage; public EmptyPagesFilterTransformer( RequestChargeTracker tracker) { if (tracker == null) { throw new IllegalArgumentException("Request Charge Tracker must not be null."); } this.tracker = tracker; this.previousPage = null; } private DocumentProducer<T>.DocumentProducerFeedResponse plusCharge( DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse, double charge) { FeedResponse<T> page = documentProducerFeedResponse.pageResult; Map<String, String> headers = new HashMap<>(page.getResponseHeaders()); double pageCharge = page.getRequestCharge(); pageCharge += charge; headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, String.valueOf(pageCharge)); FeedResponse<T> newPage = BridgeInternal.createFeedResponseWithQueryMetrics(page.getResults(), headers, BridgeInternal.queryMetricsFromFeedResponse(page)); documentProducerFeedResponse.pageResult = newPage; return documentProducerFeedResponse; } private DocumentProducer<T>.DocumentProducerFeedResponse addCompositeContinuationToken( DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse, String compositeContinuationToken) { FeedResponse<T> page = documentProducerFeedResponse.pageResult; Map<String, String> headers = new HashMap<>(page.getResponseHeaders()); headers.put(HttpConstants.HttpHeaders.CONTINUATION, compositeContinuationToken); FeedResponse<T> newPage = BridgeInternal.createFeedResponseWithQueryMetrics(page.getResults(), headers, BridgeInternal.queryMetricsFromFeedResponse(page)); documentProducerFeedResponse.pageResult = newPage; return documentProducerFeedResponse; } private static Map<String, String> headerResponse( double requestCharge) { return Utils.immutableMapOf(HttpConstants.HttpHeaders.REQUEST_CHARGE, String.valueOf(requestCharge)); } @Override public Flux<FeedResponse<T>> apply(Flux<DocumentProducer<T>.DocumentProducerFeedResponse> source) { return source.filter(documentProducerFeedResponse -> { if (documentProducerFeedResponse.pageResult.getResults().isEmpty()) { tracker.addCharge(documentProducerFeedResponse.pageResult.getRequestCharge()); return false; } return true; }).map(documentProducerFeedResponse -> { double charge = tracker.getAndResetCharge(); if (charge > 0) { return new ValueHolder<>(plusCharge(documentProducerFeedResponse, charge)); } else { return new ValueHolder<>(documentProducerFeedResponse); } }).concatWith(Flux.just(new ValueHolder<>(null))).map(heldValue -> { DocumentProducer<T>.DocumentProducerFeedResponse documentProducerFeedResponse = heldValue.v; ImmutablePair<DocumentProducer<T>.DocumentProducerFeedResponse, DocumentProducer<T>.DocumentProducerFeedResponse> previousCurrent = new ImmutablePair<>( this.previousPage, documentProducerFeedResponse); this.previousPage = documentProducerFeedResponse; return previousCurrent; }).skip(1).map(currentNext -> { DocumentProducer<T>.DocumentProducerFeedResponse current = currentNext.left; DocumentProducer<T>.DocumentProducerFeedResponse next = currentNext.right; String compositeContinuationToken; String backendContinuationToken = current.pageResult.getContinuationToken(); if (backendContinuationToken == null) { if (next == null) { compositeContinuationToken = null; } else { CompositeContinuationToken compositeContinuationTokenDom = new CompositeContinuationToken(null, next.sourcePartitionKeyRange.toRange()); compositeContinuationToken = compositeContinuationTokenDom.toJson(); } } else { CompositeContinuationToken compositeContinuationTokenDom = new CompositeContinuationToken( backendContinuationToken, current.sourcePartitionKeyRange.toRange()); compositeContinuationToken = compositeContinuationTokenDom.toJson(); } DocumentProducer<T>.DocumentProducerFeedResponse page; page = current; page = this.addCompositeContinuationToken(page, compositeContinuationToken); return page; }).map(documentProducerFeedResponse -> { return documentProducerFeedResponse.pageResult; }).switchIfEmpty(Flux.defer(() -> { return Flux.just(BridgeInternal.createFeedResponse(Utils.immutableListOf(), headerResponse(tracker.getAndResetCharge()))); })); } }
this should be return Mono.error(logger.logExceptionAsError(new Exception("Azure CL...... Would like to use a better exception class instead of just Exception.
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); command.append(scopes); AccessToken token = null; try { String starter; String switcher; if (System.getProperty("os.name").contains("Windows")) { starter = "cmd.exe"; switcher = "/c"; } else { starter = "/bin/sh"; switcher = "-c"; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); builder.redirectErrorStream(true); Process p = builder.start(); BufferedReader r = new BufferedReader(new InputStreamReader(p.getInputStream())); String line; StringBuilder output = new StringBuilder(); while (true) { line = r.readLine(); if (line == null) { break; } if (line.startsWith("'az' is not recognized") || line.matches("(.*)az:(.*)not found")) { throw new Exception("Azure CLI not installed"); } output.append(line); } String processOutput = output.toString(); if (p.exitValue() != 0) { throw new Exception(processOutput); } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()).toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (Exception e) { return Mono.error(e); } return Mono.just(token); }
throw new Exception("Azure CLI not installed");
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); command.append(scopes); AccessToken token = null; try { String starter; String switcher; if (System.getProperty("os.name").contains("Windows")) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); builder.redirectErrorStream(true); Process process = builder.start(); BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream())); String line; StringBuilder output = new StringBuilder(); while (true) { line = reader.readLine(); if (line == null) { break; } if(line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) { throw logger.logExceptionAsError(new ClientAuthenticationException("Azure CLI not installed", null)); } output.append(line); } String processOutput = output.toString(); if(process.exitValue() != 0){ throw logger.logExceptionAsError(new ClientAuthenticationException(processOutput, null)); } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME).atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } finally { reader.close(); } return Mono.just(token); }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with Aure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; URI browserUri; try { redirectUri = new URI(String.format("http: browserUri = new URI(String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes()))); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { Desktop.getDesktop().browse(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found"; private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; URI browserUri; try { redirectUri = new URI(String.format("http: browserUri = new URI(String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes()))); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { Desktop.getDesktop().browse(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
same here, return Mono.error(logger.logExceptionAsError(new Exception(..... Would like to use a better exception class instead of just Exception.
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); command.append(scopes); AccessToken token = null; try { String starter; String switcher; if (System.getProperty("os.name").contains("Windows")) { starter = "cmd.exe"; switcher = "/c"; } else { starter = "/bin/sh"; switcher = "-c"; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); builder.redirectErrorStream(true); Process p = builder.start(); BufferedReader r = new BufferedReader(new InputStreamReader(p.getInputStream())); String line; StringBuilder output = new StringBuilder(); while (true) { line = r.readLine(); if (line == null) { break; } if (line.startsWith("'az' is not recognized") || line.matches("(.*)az:(.*)not found")) { throw new Exception("Azure CLI not installed"); } output.append(line); } String processOutput = output.toString(); if (p.exitValue() != 0) { throw new Exception(processOutput); } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()).toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (Exception e) { return Mono.error(e); } return Mono.just(token); }
throw new Exception(processOutput);
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); command.append(scopes); AccessToken token = null; try { String starter; String switcher; if (System.getProperty("os.name").contains("Windows")) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); builder.redirectErrorStream(true); Process process = builder.start(); BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream())); String line; StringBuilder output = new StringBuilder(); while (true) { line = reader.readLine(); if (line == null) { break; } if(line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) { throw logger.logExceptionAsError(new ClientAuthenticationException("Azure CLI not installed", null)); } output.append(line); } String processOutput = output.toString(); if(process.exitValue() != 0){ throw logger.logExceptionAsError(new ClientAuthenticationException(processOutput, null)); } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME).atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } finally { reader.close(); } return Mono.just(token); }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with Aure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; URI browserUri; try { redirectUri = new URI(String.format("http: browserUri = new URI(String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes()))); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { Desktop.getDesktop().browse(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found"; private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; URI browserUri; try { redirectUri = new URI(String.format("http: browserUri = new URI(String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes()))); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { Desktop.getDesktop().browse(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
Is there a way to test opening a browser?
private void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { logger.error("Browser could not be opened - please open {} in a browser on this device.", url); } }
rt.exec("xdg-open " + url);
private void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { logger.error("Browser could not be opened - please open {} in a browser on this device.", url); } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; String browserUri; try { redirectUri = new URI(String.format("http: browserUri = String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes())); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { openUrl(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; String browserUri; try { redirectUri = new URI(String.format("http: browserUri = String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes())); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { openUrl(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
Okay, so, its being caught here. better to add here, Mono.error(logger.logExceptionAsError(e)), logger can be used from azure core
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); command.append(scopes); AccessToken token = null; try { String starter; String switcher; if (System.getProperty("os.name").contains("Windows")) { starter = "cmd.exe"; switcher = "/c"; } else { starter = "/bin/sh"; switcher = "-c"; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); builder.redirectErrorStream(true); Process p = builder.start(); BufferedReader r = new BufferedReader(new InputStreamReader(p.getInputStream())); String line; StringBuilder output = new StringBuilder(); while (true) { line = r.readLine(); if (line == null) { break; } if (line.startsWith("'az' is not recognized") || line.matches("(.*)az:(.*)not found")) { throw new Exception("Azure CLI not installed"); } output.append(line); } String processOutput = output.toString(); if (p.exitValue() != 0) { throw new Exception(processOutput); } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()).toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (Exception e) { return Mono.error(e); } return Mono.just(token); }
return Mono.error(e);
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); command.append(scopes); AccessToken token = null; try { String starter; String switcher; if (System.getProperty("os.name").contains("Windows")) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); builder.redirectErrorStream(true); Process process = builder.start(); BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream())); String line; StringBuilder output = new StringBuilder(); while (true) { line = reader.readLine(); if (line == null) { break; } if(line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) { throw logger.logExceptionAsError(new ClientAuthenticationException("Azure CLI not installed", null)); } output.append(line); } String processOutput = output.toString(); if(process.exitValue() != 0){ throw logger.logExceptionAsError(new ClientAuthenticationException(processOutput, null)); } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME).atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } finally { reader.close(); } return Mono.just(token); }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with Aure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; URI browserUri; try { redirectUri = new URI(String.format("http: browserUri = new URI(String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes()))); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { Desktop.getDesktop().browse(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found"; private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; URI browserUri; try { redirectUri = new URI(String.format("http: browserUri = new URI(String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes()))); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { Desktop.getDesktop().browse(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
This reader must be closed after use. Also, change name to `reader` instead of using a single char.
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); command.append(scopes); AccessToken token = null; try { String starter; String switcher; if (System.getProperty("os.name").contains("Windows")) { starter = "cmd.exe"; switcher = "/c"; } else { starter = "/bin/sh"; switcher = "-c"; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); builder.redirectErrorStream(true); Process p = builder.start(); BufferedReader r = new BufferedReader(new InputStreamReader(p.getInputStream())); String line; StringBuilder output = new StringBuilder(); while (true) { line = r.readLine(); if (line == null) { break; } if(line.startsWith("'az' is not recognized") || line.matches("(.*)az:(.*)not found")) { throw logger.logExceptionAsError(new ClientAuthenticationException("Azure CLI not installed", null)); } output.append(line); } String processOutput = output.toString(); if(p.exitValue() != 0){ throw logger.logExceptionAsError(new ClientAuthenticationException(processOutput, null)); } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME).atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } return Mono.just(token); }
BufferedReader r = new BufferedReader(new InputStreamReader(p.getInputStream()));
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); command.append(scopes); AccessToken token = null; try { String starter; String switcher; if (System.getProperty("os.name").contains("Windows")) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); builder.redirectErrorStream(true); Process process = builder.start(); BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream())); String line; StringBuilder output = new StringBuilder(); while (true) { line = reader.readLine(); if (line == null) { break; } if(line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) { throw logger.logExceptionAsError(new ClientAuthenticationException("Azure CLI not installed", null)); } output.append(line); } String processOutput = output.toString(); if(process.exitValue() != 0){ throw logger.logExceptionAsError(new ClientAuthenticationException(processOutput, null)); } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME).atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } finally { reader.close(); } return Mono.just(token); }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with Aure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; URI browserUri; try { redirectUri = new URI(String.format("http: browserUri = new URI(String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes()))); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { Desktop.getDesktop().browse(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found"; private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; URI browserUri; try { redirectUri = new URI(String.format("http: browserUri = new URI(String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes()))); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { Desktop.getDesktop().browse(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
Use `process` instead of `p`.
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); command.append(scopes); AccessToken token = null; try { String starter; String switcher; if (System.getProperty("os.name").contains("Windows")) { starter = "cmd.exe"; switcher = "/c"; } else { starter = "/bin/sh"; switcher = "-c"; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); builder.redirectErrorStream(true); Process p = builder.start(); BufferedReader r = new BufferedReader(new InputStreamReader(p.getInputStream())); String line; StringBuilder output = new StringBuilder(); while (true) { line = r.readLine(); if (line == null) { break; } if(line.startsWith("'az' is not recognized") || line.matches("(.*)az:(.*)not found")) { throw logger.logExceptionAsError(new ClientAuthenticationException("Azure CLI not installed", null)); } output.append(line); } String processOutput = output.toString(); if(p.exitValue() != 0){ throw logger.logExceptionAsError(new ClientAuthenticationException(processOutput, null)); } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME).atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } return Mono.just(token); }
Process p = builder.start();
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); command.append(scopes); AccessToken token = null; try { String starter; String switcher; if (System.getProperty("os.name").contains("Windows")) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); builder.redirectErrorStream(true); Process process = builder.start(); BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream())); String line; StringBuilder output = new StringBuilder(); while (true) { line = reader.readLine(); if (line == null) { break; } if(line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) { throw logger.logExceptionAsError(new ClientAuthenticationException("Azure CLI not installed", null)); } output.append(line); } String processOutput = output.toString(); if(process.exitValue() != 0){ throw logger.logExceptionAsError(new ClientAuthenticationException(processOutput, null)); } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME).atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } finally { reader.close(); } return Mono.just(token); }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with Aure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; URI browserUri; try { redirectUri = new URI(String.format("http: browserUri = new URI(String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes()))); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { Desktop.getDesktop().browse(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found"; private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; URI browserUri; try { redirectUri = new URI(String.format("http: browserUri = new URI(String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes()))); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { Desktop.getDesktop().browse(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
Use string constants here.
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); command.append(scopes); AccessToken token = null; try { String starter; String switcher; if (System.getProperty("os.name").contains("Windows")) { starter = "cmd.exe"; switcher = "/c"; } else { starter = "/bin/sh"; switcher = "-c"; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); builder.redirectErrorStream(true); Process p = builder.start(); BufferedReader r = new BufferedReader(new InputStreamReader(p.getInputStream())); String line; StringBuilder output = new StringBuilder(); while (true) { line = r.readLine(); if (line == null) { break; } if(line.startsWith("'az' is not recognized") || line.matches("(.*)az:(.*)not found")) { throw logger.logExceptionAsError(new ClientAuthenticationException("Azure CLI not installed", null)); } output.append(line); } String processOutput = output.toString(); if(p.exitValue() != 0){ throw logger.logExceptionAsError(new ClientAuthenticationException(processOutput, null)); } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME).atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } return Mono.just(token); }
if(line.startsWith("'az' is not recognized") || line.matches("(.*)az:(.*)not found")) {
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); command.append(scopes); AccessToken token = null; try { String starter; String switcher; if (System.getProperty("os.name").contains("Windows")) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); builder.redirectErrorStream(true); Process process = builder.start(); BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream())); String line; StringBuilder output = new StringBuilder(); while (true) { line = reader.readLine(); if (line == null) { break; } if(line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) { throw logger.logExceptionAsError(new ClientAuthenticationException("Azure CLI not installed", null)); } output.append(line); } String processOutput = output.toString(); if(process.exitValue() != 0){ throw logger.logExceptionAsError(new ClientAuthenticationException(processOutput, null)); } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME).atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } finally { reader.close(); } return Mono.just(token); }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with Aure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; URI browserUri; try { redirectUri = new URI(String.format("http: browserUri = new URI(String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes()))); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { Desktop.getDesktop().browse(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found"; private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; URI browserUri; try { redirectUri = new URI(String.format("http: browserUri = new URI(String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes()))); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { Desktop.getDesktop().browse(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
Use string constants.
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); command.append(scopes); AccessToken token = null; try { String starter; String switcher; if (System.getProperty("os.name").contains("Windows")) { starter = "cmd.exe"; switcher = "/c"; } else { starter = "/bin/sh"; switcher = "-c"; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); builder.redirectErrorStream(true); Process p = builder.start(); BufferedReader r = new BufferedReader(new InputStreamReader(p.getInputStream())); String line; StringBuilder output = new StringBuilder(); while (true) { line = r.readLine(); if (line == null) { break; } if(line.startsWith("'az' is not recognized") || line.matches("(.*)az:(.*)not found")) { throw logger.logExceptionAsError(new ClientAuthenticationException("Azure CLI not installed", null)); } output.append(line); } String processOutput = output.toString(); if(p.exitValue() != 0){ throw logger.logExceptionAsError(new ClientAuthenticationException(processOutput, null)); } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME).atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } return Mono.just(token); }
starter = "cmd.exe";
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); command.append(scopes); AccessToken token = null; try { String starter; String switcher; if (System.getProperty("os.name").contains("Windows")) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); builder.redirectErrorStream(true); Process process = builder.start(); BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream())); String line; StringBuilder output = new StringBuilder(); while (true) { line = reader.readLine(); if (line == null) { break; } if(line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) { throw logger.logExceptionAsError(new ClientAuthenticationException("Azure CLI not installed", null)); } output.append(line); } String processOutput = output.toString(); if(process.exitValue() != 0){ throw logger.logExceptionAsError(new ClientAuthenticationException(processOutput, null)); } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME).atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } finally { reader.close(); } return Mono.just(token); }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with Aure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; URI browserUri; try { redirectUri = new URI(String.format("http: browserUri = new URI(String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes()))); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { Desktop.getDesktop().browse(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found"; private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; URI browserUri; try { redirectUri = new URI(String.format("http: browserUri = new URI(String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes()))); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { Desktop.getDesktop().browse(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
In the method `openUrl` of class `IdentityClient`, doing similar things with string, didn't use constants. Is it better to follow that way?
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); command.append(scopes); AccessToken token = null; try { String starter; String switcher; if (System.getProperty("os.name").contains("Windows")) { starter = "cmd.exe"; switcher = "/c"; } else { starter = "/bin/sh"; switcher = "-c"; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); builder.redirectErrorStream(true); Process p = builder.start(); BufferedReader r = new BufferedReader(new InputStreamReader(p.getInputStream())); String line; StringBuilder output = new StringBuilder(); while (true) { line = r.readLine(); if (line == null) { break; } if(line.startsWith("'az' is not recognized") || line.matches("(.*)az:(.*)not found")) { throw logger.logExceptionAsError(new ClientAuthenticationException("Azure CLI not installed", null)); } output.append(line); } String processOutput = output.toString(); if(p.exitValue() != 0){ throw logger.logExceptionAsError(new ClientAuthenticationException(processOutput, null)); } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME).atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } return Mono.just(token); }
starter = "cmd.exe";
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); command.append(scopes); AccessToken token = null; try { String starter; String switcher; if (System.getProperty("os.name").contains("Windows")) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); builder.redirectErrorStream(true); Process process = builder.start(); BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream())); String line; StringBuilder output = new StringBuilder(); while (true) { line = reader.readLine(); if (line == null) { break; } if(line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) { throw logger.logExceptionAsError(new ClientAuthenticationException("Azure CLI not installed", null)); } output.append(line); } String processOutput = output.toString(); if(process.exitValue() != 0){ throw logger.logExceptionAsError(new ClientAuthenticationException(processOutput, null)); } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME).atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } finally { reader.close(); } return Mono.just(token); }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with Aure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; URI browserUri; try { redirectUri = new URI(String.format("http: browserUri = new URI(String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes()))); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { Desktop.getDesktop().browse(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found"; private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; URI browserUri; try { redirectUri = new URI(String.format("http: browserUri = new URI(String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes()))); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { Desktop.getDesktop().browse(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
In the method `openUrl` of class `IdentityClient`, doing similar things with string, didn't use constants. Is it better to follow that way?
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); command.append(scopes); AccessToken token = null; try { String starter; String switcher; if (System.getProperty("os.name").contains("Windows")) { starter = "cmd.exe"; switcher = "/c"; } else { starter = "/bin/sh"; switcher = "-c"; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); builder.redirectErrorStream(true); Process p = builder.start(); BufferedReader r = new BufferedReader(new InputStreamReader(p.getInputStream())); String line; StringBuilder output = new StringBuilder(); while (true) { line = r.readLine(); if (line == null) { break; } if(line.startsWith("'az' is not recognized") || line.matches("(.*)az:(.*)not found")) { throw logger.logExceptionAsError(new ClientAuthenticationException("Azure CLI not installed", null)); } output.append(line); } String processOutput = output.toString(); if(p.exitValue() != 0){ throw logger.logExceptionAsError(new ClientAuthenticationException(processOutput, null)); } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME).atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } return Mono.just(token); }
if(line.startsWith("'az' is not recognized") || line.matches("(.*)az:(.*)not found")) {
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); command.append(scopes); AccessToken token = null; try { String starter; String switcher; if (System.getProperty("os.name").contains("Windows")) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); builder.redirectErrorStream(true); Process process = builder.start(); BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream())); String line; StringBuilder output = new StringBuilder(); while (true) { line = reader.readLine(); if (line == null) { break; } if(line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) { throw logger.logExceptionAsError(new ClientAuthenticationException("Azure CLI not installed", null)); } output.append(line); } String processOutput = output.toString(); if(process.exitValue() != 0){ throw logger.logExceptionAsError(new ClientAuthenticationException(processOutput, null)); } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME).atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } finally { reader.close(); } return Mono.just(token); }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with Aure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; URI browserUri; try { redirectUri = new URI(String.format("http: browserUri = new URI(String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes()))); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { Desktop.getDesktop().browse(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found"; private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; URI browserUri; try { redirectUri = new URI(String.format("http: browserUri = new URI(String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes()))); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { Desktop.getDesktop().browse(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
I would suggest creating `private static final String WINDOWS_STARTER = "cmd.exe"` and then use `starter = WINDOWS_STARTER` here.
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); command.append(scopes); AccessToken token = null; try { String starter; String switcher; if (System.getProperty("os.name").contains("Windows")) { starter = "cmd.exe"; switcher = "/c"; } else { starter = "/bin/sh"; switcher = "-c"; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); builder.redirectErrorStream(true); Process p = builder.start(); BufferedReader r = new BufferedReader(new InputStreamReader(p.getInputStream())); String line; StringBuilder output = new StringBuilder(); while (true) { line = r.readLine(); if (line == null) { break; } if(line.startsWith("'az' is not recognized") || line.matches("(.*)az:(.*)not found")) { throw logger.logExceptionAsError(new ClientAuthenticationException("Azure CLI not installed", null)); } output.append(line); } String processOutput = output.toString(); if(p.exitValue() != 0){ throw logger.logExceptionAsError(new ClientAuthenticationException(processOutput, null)); } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME).atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } return Mono.just(token); }
starter = "cmd.exe";
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); command.append(scopes); AccessToken token = null; try { String starter; String switcher; if (System.getProperty("os.name").contains("Windows")) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); builder.redirectErrorStream(true); Process process = builder.start(); BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream())); String line; StringBuilder output = new StringBuilder(); while (true) { line = reader.readLine(); if (line == null) { break; } if(line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) { throw logger.logExceptionAsError(new ClientAuthenticationException("Azure CLI not installed", null)); } output.append(line); } String processOutput = output.toString(); if(process.exitValue() != 0){ throw logger.logExceptionAsError(new ClientAuthenticationException(processOutput, null)); } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME).atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } finally { reader.close(); } return Mono.just(token); }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with Aure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; URI browserUri; try { redirectUri = new URI(String.format("http: browserUri = new URI(String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes()))); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { Desktop.getDesktop().browse(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found"; private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; URI browserUri; try { redirectUri = new URI(String.format("http: browserUri = new URI(String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes()))); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { Desktop.getDesktop().browse(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
same as above.
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); command.append(scopes); AccessToken token = null; try { String starter; String switcher; if (System.getProperty("os.name").contains("Windows")) { starter = "cmd.exe"; switcher = "/c"; } else { starter = "/bin/sh"; switcher = "-c"; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); builder.redirectErrorStream(true); Process p = builder.start(); BufferedReader r = new BufferedReader(new InputStreamReader(p.getInputStream())); String line; StringBuilder output = new StringBuilder(); while (true) { line = r.readLine(); if (line == null) { break; } if(line.startsWith("'az' is not recognized") || line.matches("(.*)az:(.*)not found")) { throw logger.logExceptionAsError(new ClientAuthenticationException("Azure CLI not installed", null)); } output.append(line); } String processOutput = output.toString(); if(p.exitValue() != 0){ throw logger.logExceptionAsError(new ClientAuthenticationException(processOutput, null)); } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME).atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } return Mono.just(token); }
if(line.startsWith("'az' is not recognized") || line.matches("(.*)az:(.*)not found")) {
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); command.append(scopes); AccessToken token = null; try { String starter; String switcher; if (System.getProperty("os.name").contains("Windows")) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); builder.redirectErrorStream(true); Process process = builder.start(); BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream())); String line; StringBuilder output = new StringBuilder(); while (true) { line = reader.readLine(); if (line == null) { break; } if(line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) { throw logger.logExceptionAsError(new ClientAuthenticationException("Azure CLI not installed", null)); } output.append(line); } String processOutput = output.toString(); if(process.exitValue() != 0){ throw logger.logExceptionAsError(new ClientAuthenticationException(processOutput, null)); } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME).atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } finally { reader.close(); } return Mono.just(token); }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with Aure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; URI browserUri; try { redirectUri = new URI(String.format("http: browserUri = new URI(String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes()))); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { Desktop.getDesktop().browse(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found"; private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; URI browserUri; try { redirectUri = new URI(String.format("http: browserUri = new URI(String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes()))); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { Desktop.getDesktop().browse(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
Close should be done in `finally` block or use try-with-resources.
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); command.append(scopes); AccessToken token = null; try { String starter; String switcher; if (System.getProperty("os.name").contains("Windows")) { starter = "cmd.exe"; switcher = "/c"; } else { starter = "/bin/sh"; switcher = "-c"; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); builder.redirectErrorStream(true); Process process = builder.start(); BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream())); String line; StringBuilder output = new StringBuilder(); while (true) { line = reader.readLine(); if (line == null) { break; } if(line.startsWith("'az' is not recognized") || line.matches("(.*)az:(.*)not found")) { throw logger.logExceptionAsError(new ClientAuthenticationException("Azure CLI not installed", null)); } output.append(line); } reader.close(); String processOutput = output.toString(); if(process.exitValue() != 0){ throw logger.logExceptionAsError(new ClientAuthenticationException(processOutput, null)); } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME).atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } return Mono.just(token); }
reader.close();
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); command.append(scopes); AccessToken token = null; try { String starter; String switcher; if (System.getProperty("os.name").contains("Windows")) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); builder.redirectErrorStream(true); Process process = builder.start(); BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream())); String line; StringBuilder output = new StringBuilder(); while (true) { line = reader.readLine(); if (line == null) { break; } if(line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) { throw logger.logExceptionAsError(new ClientAuthenticationException("Azure CLI not installed", null)); } output.append(line); } String processOutput = output.toString(); if(process.exitValue() != 0){ throw logger.logExceptionAsError(new ClientAuthenticationException(processOutput, null)); } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME).atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } finally { reader.close(); } return Mono.just(token); }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; URI browserUri; try { redirectUri = new URI(String.format("http: browserUri = new URI(String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes()))); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { Desktop.getDesktop().browse(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found"; private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; URI browserUri; try { redirectUri = new URI(String.format("http: browserUri = new URI(String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes()))); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { Desktop.getDesktop().browse(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
Probably should just print `The lease has been successfully broken.` given that they break immediately and are infinite so there is no remaining time on it.
public void breakLeaseWithResponseCodeSnippets() { client.breakLeaseWithResponse().subscribe(response -> System.out.printf("The broken lease has %d seconds remaining on the lease", response.getValue())); }
System.out.printf("The broken lease has %d seconds remaining on the lease", response.getValue()));
public void breakLeaseWithResponseCodeSnippets() { client.breakLeaseWithResponse().subscribe(response -> System.out.printf("The broken lease has %d seconds remaining on the lease", response.getValue())); }
class LeaseAsyncClientJavaDocCodeSnippets { private ShareLeaseAsyncClient client = new ShareLeaseClientBuilder() .fileAsyncClient(new ShareFileClientBuilder().resourcePath("file").buildFileAsyncClient()) .buildAsyncClient(); /** * Code snippets for {@link ShareLeaseAsyncClient */ public void acquireLeaseCodeSnippet() { client.acquireLease().subscribe(response -> System.out.printf("Lease ID is %s%n", response)); } /** * Code snippets for {@link ShareLeaseAsyncClient */ public void releaseLeaseCodeSnippet() { client.releaseLease().subscribe(response -> System.out.println("Completed release lease")); } /** * Code snippets for {@link ShareLeaseAsyncClient */ public void breakLeaseCodeSnippet() { client.breakLease().subscribe(response -> System.out.printf("The broken lease has %d seconds remaining on the lease", response)); } /** * Code snippets for {@link ShareLeaseAsyncClient */ public void changeLeaseCodeSnippet() { client.changeLease("proposedId").subscribe(response -> System.out.printf("Changed lease ID is %s%n", response)); } /** * Code snippets for {@link ShareLeaseAsyncClient */ public void acquireLeaseWithResponseCodeSnippets() { client.acquireLeaseWithResponse().subscribe(response -> System.out.printf("Lease ID is %s%n", response.getValue())); } /** * Code snippets for {@link ShareLeaseAsyncClient */ public void releaseLeaseWithResponseCodeSnippets() { client.releaseLeaseWithResponse().subscribe(response -> System.out.printf("Release lease completed with status %d%n", response.getStatusCode())); } /** * Code snippets for {@link ShareLeaseAsyncClient */ /** * Code snippets for {@link ShareLeaseAsyncClient */ public void changeLeaseWithResponseCodeSnippets() { client.changeLeaseWithResponse("proposedId").subscribe(response -> System.out.printf("Changed lease ID is %s%n", response.getValue())); } }
class LeaseAsyncClientJavaDocCodeSnippets { private ShareLeaseAsyncClient client = new ShareLeaseClientBuilder() .fileAsyncClient(new ShareFileClientBuilder().resourcePath("file").buildFileAsyncClient()) .buildAsyncClient(); /** * Code snippets for {@link ShareLeaseAsyncClient */ public void acquireLeaseCodeSnippet() { client.acquireLease().subscribe(response -> System.out.printf("Lease ID is %s%n", response)); } /** * Code snippets for {@link ShareLeaseAsyncClient */ public void releaseLeaseCodeSnippet() { client.releaseLease().subscribe(response -> System.out.println("Completed release lease")); } /** * Code snippets for {@link ShareLeaseAsyncClient */ public void breakLeaseCodeSnippet() { client.breakLease().subscribe(response -> System.out.printf("The broken lease has %d seconds remaining on the lease", response)); } /** * Code snippets for {@link ShareLeaseAsyncClient */ public void changeLeaseCodeSnippet() { client.changeLease("proposedId").subscribe(response -> System.out.printf("Changed lease ID is %s%n", response)); } /** * Code snippets for {@link ShareLeaseAsyncClient */ public void acquireLeaseWithResponseCodeSnippets() { client.acquireLeaseWithResponse().subscribe(response -> System.out.printf("Lease ID is %s%n", response.getValue())); } /** * Code snippets for {@link ShareLeaseAsyncClient */ public void releaseLeaseWithResponseCodeSnippets() { client.releaseLeaseWithResponse().subscribe(response -> System.out.printf("Release lease completed with status %d%n", response.getStatusCode())); } /** * Code snippets for {@link ShareLeaseAsyncClient */ /** * Code snippets for {@link ShareLeaseAsyncClient */ public void changeLeaseWithResponseCodeSnippets() { client.changeLeaseWithResponse("proposedId").subscribe(response -> System.out.printf("Changed lease ID is %s%n", response.getValue())); } }
Same as the other codesnippet
public void breakLeaseCodeSnippet() { client.breakLease().subscribe(response -> System.out.printf("The broken lease has %d seconds remaining on the lease", response)); }
System.out.printf("The broken lease has %d seconds remaining on the lease", response));
public void breakLeaseCodeSnippet() { client.breakLease().subscribe(response -> System.out.printf("The broken lease has %d seconds remaining on the lease", response)); }
class LeaseAsyncClientJavaDocCodeSnippets { private ShareLeaseAsyncClient client = new ShareLeaseClientBuilder() .fileAsyncClient(new ShareFileClientBuilder().resourcePath("file").buildFileAsyncClient()) .buildAsyncClient(); /** * Code snippets for {@link ShareLeaseAsyncClient */ public void acquireLeaseCodeSnippet() { client.acquireLease().subscribe(response -> System.out.printf("Lease ID is %s%n", response)); } /** * Code snippets for {@link ShareLeaseAsyncClient */ public void releaseLeaseCodeSnippet() { client.releaseLease().subscribe(response -> System.out.println("Completed release lease")); } /** * Code snippets for {@link ShareLeaseAsyncClient */ /** * Code snippets for {@link ShareLeaseAsyncClient */ public void changeLeaseCodeSnippet() { client.changeLease("proposedId").subscribe(response -> System.out.printf("Changed lease ID is %s%n", response)); } /** * Code snippets for {@link ShareLeaseAsyncClient */ public void acquireLeaseWithResponseCodeSnippets() { client.acquireLeaseWithResponse().subscribe(response -> System.out.printf("Lease ID is %s%n", response.getValue())); } /** * Code snippets for {@link ShareLeaseAsyncClient */ public void releaseLeaseWithResponseCodeSnippets() { client.releaseLeaseWithResponse().subscribe(response -> System.out.printf("Release lease completed with status %d%n", response.getStatusCode())); } /** * Code snippets for {@link ShareLeaseAsyncClient */ public void breakLeaseWithResponseCodeSnippets() { client.breakLeaseWithResponse().subscribe(response -> System.out.printf("The broken lease has %d seconds remaining on the lease", response.getValue())); } /** * Code snippets for {@link ShareLeaseAsyncClient */ public void changeLeaseWithResponseCodeSnippets() { client.changeLeaseWithResponse("proposedId").subscribe(response -> System.out.printf("Changed lease ID is %s%n", response.getValue())); } }
class LeaseAsyncClientJavaDocCodeSnippets { private ShareLeaseAsyncClient client = new ShareLeaseClientBuilder() .fileAsyncClient(new ShareFileClientBuilder().resourcePath("file").buildFileAsyncClient()) .buildAsyncClient(); /** * Code snippets for {@link ShareLeaseAsyncClient */ public void acquireLeaseCodeSnippet() { client.acquireLease().subscribe(response -> System.out.printf("Lease ID is %s%n", response)); } /** * Code snippets for {@link ShareLeaseAsyncClient */ public void releaseLeaseCodeSnippet() { client.releaseLease().subscribe(response -> System.out.println("Completed release lease")); } /** * Code snippets for {@link ShareLeaseAsyncClient */ /** * Code snippets for {@link ShareLeaseAsyncClient */ public void changeLeaseCodeSnippet() { client.changeLease("proposedId").subscribe(response -> System.out.printf("Changed lease ID is %s%n", response)); } /** * Code snippets for {@link ShareLeaseAsyncClient */ public void acquireLeaseWithResponseCodeSnippets() { client.acquireLeaseWithResponse().subscribe(response -> System.out.printf("Lease ID is %s%n", response.getValue())); } /** * Code snippets for {@link ShareLeaseAsyncClient */ public void releaseLeaseWithResponseCodeSnippets() { client.releaseLeaseWithResponse().subscribe(response -> System.out.printf("Release lease completed with status %d%n", response.getStatusCode())); } /** * Code snippets for {@link ShareLeaseAsyncClient */ public void breakLeaseWithResponseCodeSnippets() { client.breakLeaseWithResponse().subscribe(response -> System.out.printf("The broken lease has %d seconds remaining on the lease", response.getValue())); } /** * Code snippets for {@link ShareLeaseAsyncClient */ public void changeLeaseWithResponseCodeSnippets() { client.changeLeaseWithResponse("proposedId").subscribe(response -> System.out.printf("Changed lease ID is %s%n", response.getValue())); } }
Probably should have a print statement similar to the `breakLease` codesnippet in this file.
public void breakLeaseWithResponseCodeSnippets() { client.breakLeaseWithResponse(timeout, new Context(key, value)); }
client.breakLeaseWithResponse(timeout, new Context(key, value));
public void breakLeaseWithResponseCodeSnippets() { client.breakLeaseWithResponse(timeout, new Context(key, value)); }
class LeaseClientJavaDocCodeSnippets { private ShareLeaseClient client = new ShareLeaseClientBuilder() .fileClient(new ShareFileClientBuilder().resourcePath("file").buildFileClient()) .buildClient(); private Duration timeout = Duration.ofSeconds(30); private String key = "key"; private String value = "value"; /** * Code snippets for {@link ShareLeaseClient */ public void acquireLease() { System.out.printf("Lease ID is %s%n", client.acquireLease()); } /** * Code snippets for {@link ShareLeaseClient */ public void releaseLease() { client.releaseLease(); System.out.println("Release lease completed"); } /** * Code snippets for {@link ShareLeaseClient */ public void breakLease() { client.breakLease(); System.out.print("The lease has been broken"); } /** * Code snippets for {@link ShareLeaseClient */ public void changeLease() { System.out.printf("Changed lease ID is %s%n", client.changeLease("proposedId")); } /** * Code snippets for {@link ShareLeaseClient */ public void acquireLeaseWithResponseCodeSnippets() { System.out.printf("Lease ID is %s%n", client .acquireLeaseWithResponse(timeout, new Context(key, value)) .getValue()); } /** * Code snippets for {@link ShareLeaseClient */ public void releaseLeaseWithResponseCodeSnippets() { System.out.printf("Release lease completed with status %d%n", client.releaseLeaseWithResponse(timeout, new Context(key, value)) .getStatusCode()); } /** * Code snippets for {@link ShareLeaseClient */ /** * Code snippets for {@link ShareLeaseClient */ public void changeLeaseWithResponseCodeSnippets() { System.out.printf("Changed lease ID is %s%n", client.changeLeaseWithResponse("proposedId", timeout, new Context(key, value)) .getValue()); } }
class LeaseClientJavaDocCodeSnippets { private ShareLeaseClient client = new ShareLeaseClientBuilder() .fileClient(new ShareFileClientBuilder().resourcePath("file").buildFileClient()) .buildClient(); private Duration timeout = Duration.ofSeconds(30); private String key = "key"; private String value = "value"; /** * Code snippets for {@link ShareLeaseClient */ public void acquireLease() { System.out.printf("Lease ID is %s%n", client.acquireLease()); } /** * Code snippets for {@link ShareLeaseClient */ public void releaseLease() { client.releaseLease(); System.out.println("Release lease completed"); } /** * Code snippets for {@link ShareLeaseClient */ public void breakLease() { client.breakLease(); System.out.print("The lease has been broken"); } /** * Code snippets for {@link ShareLeaseClient */ public void changeLease() { System.out.printf("Changed lease ID is %s%n", client.changeLease("proposedId")); } /** * Code snippets for {@link ShareLeaseClient */ public void acquireLeaseWithResponseCodeSnippets() { System.out.printf("Lease ID is %s%n", client .acquireLeaseWithResponse(timeout, new Context(key, value)) .getValue()); } /** * Code snippets for {@link ShareLeaseClient */ public void releaseLeaseWithResponseCodeSnippets() { System.out.printf("Release lease completed with status %d%n", client.releaseLeaseWithResponse(timeout, new Context(key, value)) .getStatusCode()); } /** * Code snippets for {@link ShareLeaseClient */ /** * Code snippets for {@link ShareLeaseClient */ public void changeLeaseWithResponseCodeSnippets() { System.out.printf("Changed lease ID is %s%n", client.changeLeaseWithResponse("proposedId", timeout, new Context(key, value)) .getValue()); } }
of non-localized String.toUpperCase() or String.toLowerCase() in com.azure.identity.implementation.IdentityClient.openUrl(String) [com.azure.identity.implementation.IdentityClient] At IdentityClient.java:[line 497] DM_CONVERT_CASE
private void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { logger.error("Browser could not be opened - please open {} in a browser on this device.", url); } }
String os = System.getProperty("os.name").toLowerCase();
private void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { logger.error("Browser could not be opened - please open {} in a browser on this device.", url); } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; String browserUri; try { redirectUri = new URI(String.format("http: browserUri = String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes())); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { openUrl(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; String browserUri; try { redirectUri = new URI(String.format("http: browserUri = String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes())); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { openUrl(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
Changed to `toLowerCase(Locale.ROOT)`
private void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { logger.error("Browser could not be opened - please open {} in a browser on this device.", url); } }
String os = System.getProperty("os.name").toLowerCase();
private void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { logger.error("Browser could not be opened - please open {} in a browser on this device.", url); } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; String browserUri; try { redirectUri = new URI(String.format("http: browserUri = String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes())); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { openUrl(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; String browserUri; try { redirectUri = new URI(String.format("http: browserUri = String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes())); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { openUrl(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
we need to look into finding a way to write and automate tests for these platform specific commands and browser end result.
private void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { logger.error("Browser could not be opened - please open {} in a browser on this device.", url); } }
rt.exec("xdg-open " + url);
private void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { logger.error("Browser could not be opened - please open {} in a browser on this device.", url); } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; String browserUri; try { redirectUri = new URI(String.format("http: browserUri = String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes())); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { openUrl(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; String browserUri; try { redirectUri = new URI(String.format("http: browserUri = String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes())); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { openUrl(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
JUnit offers attributes which can perform per test prerequisites, could look into that. https://keyholesoftware.com/2018/02/12/disabling-filtering-tests-junit-5/
private void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { logger.error("Browser could not be opened - please open {} in a browser on this device.", url); } }
rt.exec("xdg-open " + url);
private void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { logger.error("Browser could not be opened - please open {} in a browser on this device.", url); } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; String browserUri; try { redirectUri = new URI(String.format("http: browserUri = String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes())); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { openUrl(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final PublicClientApplication publicClientApplication; private final String tenantId; private final String clientId; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, IdentityClientOptions options) { if (tenantId == null) { tenantId = "common"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.options = options; if (clientId == null) { this.publicClientApplication = null; } else { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getProxyOptions() != null) { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } this.publicClientApplication = publicClientApplicationBuilder.build(); } } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param clientSecret the client secret of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(clientSecret)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (MalformedURLException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(new FileInputStream(pfxCertificatePath), pfxCertificatePassword)) .authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (CertificateException | UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException | NoSuchProviderException | IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * * @param pemCertificatePath the path to the PEM certificate of the application * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; try { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath)); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.create(CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes))).authority(authorityUrl); if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())) .map(ar -> new AccessToken(ar.accessToken(), OffsetDateTime.ofInstant(ar.expiresOnDate().toInstant(), ZoneOffset.UTC))); } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return Mono.fromFuture(publicClientApplication.acquireToken( UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) { SilentParameters parameters; if (msalToken.getAccount() != null) { parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build(); } else { parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build(); } return Mono.defer(() -> { try { return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } }); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return publicClientApplication.acquireToken(parameters); }).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { return Mono.fromFuture(() -> publicClientApplication.acquireToken( AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build())) .map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; return AuthorizationCodeListener.create(port) .flatMap(server -> { URI redirectUri; String browserUri; try { redirectUri = new URI(String.format("http: browserUri = String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt" + "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s", authorityUrl, clientId, redirectUri.toString(), UUID.randomUUID(), String.join(" ", request.getScopes())); } catch (URISyntaxException e) { return server.dispose().then(Mono.error(e)); } return server.listen() .mergeWith(Mono.<String>fromRunnable(() -> { try { openUrl(browserUri); } catch (IOException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } }).subscribeOn(Schedulers.newSingle("browser"))) .next() .flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri)) .onErrorResume(t -> server.dispose().then(Mono.error(t))) .flatMap(msalToken -> server.dispose().then(Mono.just(msalToken))); }); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } }
This should be logged as well. Use the monoError() helper method in FluxUtil instead.
Mono<BlobBatchOperationInfo> prepareBlobBatchSubmission() { if (batchOperationQueue.isEmpty()) { return Mono.error(new UnsupportedOperationException("Empty batch requests aren't allowed.")); } BlobBatchOperationInfo operationInfo = new BlobBatchOperationInfo(); Deque<BlobBatchOperation<?>> operations = batchOperationQueue; batchOperationQueue = new ConcurrentLinkedDeque<>(); return Flux.generate(sink -> { if (operations.isEmpty()) { operationInfo.finalizeBatchOperations(); sink.complete(); } else { BlobBatchOperation<?> batchOperation = operations.pop(); sink.next(batchOperation.getResponse() .subscriberContext(Context.of(BATCH_REQUEST_URL_PATH, batchOperation.getRequestUrlPath(), BATCH_OPERATION_RESPONSE, batchOperation.getBatchOperationResponse(), BATCH_OPERATION_INFO, operationInfo)) .subscribe()); } }).then(Mono.just(operationInfo)); }
return Mono.error(new UnsupportedOperationException("Empty batch requests aren't allowed."));
Mono<BlobBatchOperationInfo> prepareBlobBatchSubmission() { if (batchOperationQueue.isEmpty()) { return monoError(logger, new UnsupportedOperationException("Empty batch requests aren't allowed.")); } BlobBatchOperationInfo operationInfo = new BlobBatchOperationInfo(); Deque<BlobBatchOperation<?>> operations = batchOperationQueue; batchOperationQueue = new ConcurrentLinkedDeque<>(); return Flux.generate(sink -> { if (operations.isEmpty()) { operationInfo.finalizeBatchOperations(); sink.complete(); } else { BlobBatchOperation<?> batchOperation = operations.pop(); sink.next(batchOperation.getResponse() .subscriberContext(Context.of(BATCH_REQUEST_URL_PATH, batchOperation.getRequestUrlPath(), BATCH_OPERATION_RESPONSE, batchOperation.getBatchOperationResponse(), BATCH_OPERATION_INFO, operationInfo)) .subscribe()); } }).then(Mono.just(operationInfo)); }
class BlobBatch { private static final String X_MS_VERSION = "x-ms-version"; private static final String BATCH_REQUEST_URL_PATH = "Batch-Request-Url-Path"; private static final String BATCH_OPERATION_RESPONSE = "Batch-Operation-Response"; private static final String BATCH_OPERATION_INFO = "Batch-Operation-Info"; private static final String PATH_TEMPLATE = "%s/%s"; /* * Track the status codes expected for the batching operations here as the batch body does not get parsed in * Azure Core where this information is maintained. */ private static final int[] EXPECTED_DELETE_STATUS_CODES = {202}; private static final int[] EXPECTED_SET_TIER_STATUS_CODES = {200, 202}; private final ClientLogger logger = new ClientLogger(BlobBatch.class); private final BlobAsyncClient blobAsyncClient; private Deque<BlobBatchOperation<?>> batchOperationQueue; private BlobBatchType batchType; BlobBatch(String accountUrl, HttpPipeline pipeline) { boolean batchHeadersPolicySet = false; HttpPipelineBuilder batchPipelineBuilder = new HttpPipelineBuilder(); for (int i = 0; i < pipeline.getPolicyCount(); i++) { HttpPipelinePolicy policy = pipeline.getPolicy(i); if (policy instanceof StorageSharedKeyCredentialPolicy) { batchHeadersPolicySet = true; batchPipelineBuilder.policies(this::cleanseHeaders, this::setRequestUrl); } batchPipelineBuilder.policies(policy); } if (!batchHeadersPolicySet) { batchPipelineBuilder.policies(this::cleanseHeaders, this::setRequestUrl); } batchPipelineBuilder.policies(this::buildBatchOperation); this.blobAsyncClient = new BlobClientBuilder() .endpoint(accountUrl) .blobName("") .pipeline(batchPipelineBuilder.build()) .buildAsyncClient(); this.batchOperationQueue = new ConcurrentLinkedDeque<>(); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param containerName The container of the blob. * @param blobName The name of the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String containerName, String blobName) { return deleteBlobHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), null, null); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param containerName The container of the blob. * @param blobName The name of the blob. * @param deleteOptions Delete options for the blob and its snapshots. * @param blobRequestConditions Additional access conditions that must be met to allow this operation. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String containerName, String blobName, DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) { return deleteBlobHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), deleteOptions, blobRequestConditions); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String blobUrl) { return deleteBlobHelper(getUrlPath(blobUrl), null, null); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @param deleteOptions Delete options for the blob and its snapshots. * @param blobRequestConditions Additional access conditions that must be met to allow this operation. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String blobUrl, DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) { return deleteBlobHelper(getUrlPath(blobUrl), deleteOptions, blobRequestConditions); } private Response<Void> deleteBlobHelper(String urlPath, DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) { setBatchType(BlobBatchType.DELETE); return createBatchOperation(blobAsyncClient.deleteWithResponse(deleteOptions, blobRequestConditions), urlPath, EXPECTED_DELETE_STATUS_CODES); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param containerName The container of the blob. * @param blobName The name of the blob. * @param accessTier The tier to set on the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String containerName, String blobName, AccessTier accessTier) { return setBlobAccessTierHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), accessTier, null); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param containerName The container of the blob. * @param blobName The name of the blob. * @param accessTier The tier to set on the blob. * @param leaseId The lease ID the active lease on the blob must match. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String containerName, String blobName, AccessTier accessTier, String leaseId) { return setBlobAccessTierHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), accessTier, leaseId); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @param accessTier The tier to set on the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String blobUrl, AccessTier accessTier) { return setBlobAccessTierHelper(getUrlPath(blobUrl), accessTier, null); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @param accessTier The tier to set on the blob. * @param leaseId The lease ID the active lease on the blob must match. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String blobUrl, AccessTier accessTier, String leaseId) { return setBlobAccessTierHelper(getUrlPath(blobUrl), accessTier, leaseId); } private Response<Void> setBlobAccessTierHelper(String urlPath, AccessTier accessTier, String leaseId) { setBatchType(BlobBatchType.SET_TIER); return createBatchOperation(blobAsyncClient.setAccessTierWithResponse(accessTier, null, leaseId), urlPath, EXPECTED_SET_TIER_STATUS_CODES); } private <T> Response<T> createBatchOperation(Mono<Response<T>> response, String urlPath, int... expectedStatusCodes) { BlobBatchOperationResponse<T> batchOperationResponse = new BlobBatchOperationResponse<>(expectedStatusCodes); batchOperationQueue.add(new BlobBatchOperation<>(batchOperationResponse, response, urlPath)); return batchOperationResponse; } private String getUrlPath(String url) { return UrlBuilder.parse(url).getPath(); } private void setBatchType(BlobBatchType batchType) { if (this.batchType == null) { this.batchType = batchType; } else if (this.batchType != batchType) { throw logger.logExceptionAsError(new UnsupportedOperationException(String.format(Locale.ROOT, "'BlobBatch' only supports homogeneous operations and is a %s batch.", this.batchType))); } } /* * This performs a cleanup operation that would be handled when the request is sent through Netty or OkHttp. * Additionally, it removes the "x-ms-version" header from the request as batch operation requests cannot have this * and it adds the header "Content-Id" that allows the request to be mapped to the response. */ private Mono<HttpResponse> cleanseHeaders(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { context.getHttpRequest().getHeaders().remove(X_MS_VERSION); Map<String, String> headers = context.getHttpRequest().getHeaders().toMap(); headers.entrySet().removeIf(header -> header.getValue() == null); context.getHttpRequest().setHeaders(new HttpHeaders(headers)); return next.process(); } /* * This performs changing the request URL to the value passed through the pipeline context. This policy is used in * place of constructing a new client for each batch request that is being sent. */ private Mono<HttpResponse> setRequestUrl(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { try { UrlBuilder requestUrl = UrlBuilder.parse(context.getHttpRequest().getUrl()); requestUrl.setPath(context.getData(BATCH_REQUEST_URL_PATH).get().toString()); context.getHttpRequest().setUrl(requestUrl.toUrl()); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(Exceptions.propagate(new IllegalStateException(ex))); } return next.process(); } /* * This will "send" the batch operation request when triggered, it simply acts as a way to build and write the * batch operation into the overall request and then returns nothing as the response. */ private Mono<HttpResponse> buildBatchOperation(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { BlobBatchOperationInfo operationInfo = (BlobBatchOperationInfo) context.getData(BATCH_OPERATION_INFO).get(); BlobBatchOperationResponse<?> batchOperationResponse = (BlobBatchOperationResponse<?>) context.getData(BATCH_OPERATION_RESPONSE).get(); operationInfo.addBatchOperation(batchOperationResponse, context.getHttpRequest()); return Mono.empty(); } }
class BlobBatch { private static final String X_MS_VERSION = "x-ms-version"; private static final String BATCH_REQUEST_URL_PATH = "Batch-Request-Url-Path"; private static final String BATCH_OPERATION_RESPONSE = "Batch-Operation-Response"; private static final String BATCH_OPERATION_INFO = "Batch-Operation-Info"; private static final String PATH_TEMPLATE = "%s/%s"; /* * Track the status codes expected for the batching operations here as the batch body does not get parsed in * Azure Core where this information is maintained. */ private static final int[] EXPECTED_DELETE_STATUS_CODES = {202}; private static final int[] EXPECTED_SET_TIER_STATUS_CODES = {200, 202}; private final ClientLogger logger = new ClientLogger(BlobBatch.class); private final BlobAsyncClient blobAsyncClient; private Deque<BlobBatchOperation<?>> batchOperationQueue; private BlobBatchType batchType; BlobBatch(String accountUrl, HttpPipeline pipeline) { boolean batchHeadersPolicySet = false; HttpPipelineBuilder batchPipelineBuilder = new HttpPipelineBuilder(); for (int i = 0; i < pipeline.getPolicyCount(); i++) { HttpPipelinePolicy policy = pipeline.getPolicy(i); if (policy instanceof StorageSharedKeyCredentialPolicy) { batchHeadersPolicySet = true; batchPipelineBuilder.policies(this::cleanseHeaders, this::setRequestUrl); } batchPipelineBuilder.policies(policy); } if (!batchHeadersPolicySet) { batchPipelineBuilder.policies(this::cleanseHeaders, this::setRequestUrl); } batchPipelineBuilder.policies(this::buildBatchOperation); this.blobAsyncClient = new BlobClientBuilder() .endpoint(accountUrl) .blobName("") .pipeline(batchPipelineBuilder.build()) .buildAsyncClient(); this.batchOperationQueue = new ConcurrentLinkedDeque<>(); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param containerName The container of the blob. * @param blobName The name of the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String containerName, String blobName) { return deleteBlobHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), null, null); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param containerName The container of the blob. * @param blobName The name of the blob. * @param deleteOptions Delete options for the blob and its snapshots. * @param blobRequestConditions Additional access conditions that must be met to allow this operation. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String containerName, String blobName, DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) { return deleteBlobHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), deleteOptions, blobRequestConditions); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String blobUrl) { return deleteBlobHelper(getUrlPath(blobUrl), null, null); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @param deleteOptions Delete options for the blob and its snapshots. * @param blobRequestConditions Additional access conditions that must be met to allow this operation. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String blobUrl, DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) { return deleteBlobHelper(getUrlPath(blobUrl), deleteOptions, blobRequestConditions); } private Response<Void> deleteBlobHelper(String urlPath, DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) { setBatchType(BlobBatchType.DELETE); return createBatchOperation(blobAsyncClient.deleteWithResponse(deleteOptions, blobRequestConditions), urlPath, EXPECTED_DELETE_STATUS_CODES); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param containerName The container of the blob. * @param blobName The name of the blob. * @param accessTier The tier to set on the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String containerName, String blobName, AccessTier accessTier) { return setBlobAccessTierHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), accessTier, null); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param containerName The container of the blob. * @param blobName The name of the blob. * @param accessTier The tier to set on the blob. * @param leaseId The lease ID the active lease on the blob must match. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String containerName, String blobName, AccessTier accessTier, String leaseId) { return setBlobAccessTierHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), accessTier, leaseId); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @param accessTier The tier to set on the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String blobUrl, AccessTier accessTier) { return setBlobAccessTierHelper(getUrlPath(blobUrl), accessTier, null); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @param accessTier The tier to set on the blob. * @param leaseId The lease ID the active lease on the blob must match. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String blobUrl, AccessTier accessTier, String leaseId) { return setBlobAccessTierHelper(getUrlPath(blobUrl), accessTier, leaseId); } private Response<Void> setBlobAccessTierHelper(String urlPath, AccessTier accessTier, String leaseId) { setBatchType(BlobBatchType.SET_TIER); return createBatchOperation(blobAsyncClient.setAccessTierWithResponse(accessTier, null, leaseId), urlPath, EXPECTED_SET_TIER_STATUS_CODES); } private <T> Response<T> createBatchOperation(Mono<Response<T>> response, String urlPath, int... expectedStatusCodes) { BlobBatchOperationResponse<T> batchOperationResponse = new BlobBatchOperationResponse<>(expectedStatusCodes); batchOperationQueue.add(new BlobBatchOperation<>(batchOperationResponse, response, urlPath)); return batchOperationResponse; } private String getUrlPath(String url) { return UrlBuilder.parse(url).getPath(); } private void setBatchType(BlobBatchType batchType) { if (this.batchType == null) { this.batchType = batchType; } else if (this.batchType != batchType) { throw logger.logExceptionAsError(new UnsupportedOperationException(String.format(Locale.ROOT, "'BlobBatch' only supports homogeneous operations and is a %s batch.", this.batchType))); } } /* * This performs a cleanup operation that would be handled when the request is sent through Netty or OkHttp. * Additionally, it removes the "x-ms-version" header from the request as batch operation requests cannot have this * and it adds the header "Content-Id" that allows the request to be mapped to the response. */ private Mono<HttpResponse> cleanseHeaders(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { context.getHttpRequest().getHeaders().remove(X_MS_VERSION); Map<String, String> headers = context.getHttpRequest().getHeaders().toMap(); headers.entrySet().removeIf(header -> header.getValue() == null); context.getHttpRequest().setHeaders(new HttpHeaders(headers)); return next.process(); } /* * This performs changing the request URL to the value passed through the pipeline context. This policy is used in * place of constructing a new client for each batch request that is being sent. */ private Mono<HttpResponse> setRequestUrl(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { try { UrlBuilder requestUrl = UrlBuilder.parse(context.getHttpRequest().getUrl()); requestUrl.setPath(context.getData(BATCH_REQUEST_URL_PATH).get().toString()); context.getHttpRequest().setUrl(requestUrl.toUrl()); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(Exceptions.propagate(new IllegalStateException(ex))); } return next.process(); } /* * This will "send" the batch operation request when triggered, it simply acts as a way to build and write the * batch operation into the overall request and then returns nothing as the response. */ private Mono<HttpResponse> buildBatchOperation(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { BlobBatchOperationInfo operationInfo = (BlobBatchOperationInfo) context.getData(BATCH_OPERATION_INFO).get(); BlobBatchOperationResponse<?> batchOperationResponse = (BlobBatchOperationResponse<?>) context.getData(BATCH_OPERATION_RESPONSE).get(); operationInfo.addBatchOperation(batchOperationResponse, context.getHttpRequest()); return Mono.empty(); } }
Is this done to reuse the BlobBatch object? If so, I am not sure if it's obvious to the user that a single instance of BlobBatch can be reused after each submission. Maybe adding documentation to `submitBatchWithResponse` will make this clear. The other option is to use this instance just once per submission and a new instance is created for each batch operation. We can simplify this by providing a method to clone an existing batch object (so users don't have to redefine all params). Currently, this silently prepares for a new batch which user may be unaware of and end up creating a new BlobBatch instance anyway.
Mono<BlobBatchOperationInfo> prepareBlobBatchSubmission() { if (batchOperationQueue.isEmpty()) { return Mono.error(new UnsupportedOperationException("Empty batch requests aren't allowed.")); } BlobBatchOperationInfo operationInfo = new BlobBatchOperationInfo(); Deque<BlobBatchOperation<?>> operations = batchOperationQueue; batchOperationQueue = new ConcurrentLinkedDeque<>(); return Flux.generate(sink -> { if (operations.isEmpty()) { operationInfo.finalizeBatchOperations(); sink.complete(); } else { BlobBatchOperation<?> batchOperation = operations.pop(); sink.next(batchOperation.getResponse() .subscriberContext(Context.of(BATCH_REQUEST_URL_PATH, batchOperation.getRequestUrlPath(), BATCH_OPERATION_RESPONSE, batchOperation.getBatchOperationResponse(), BATCH_OPERATION_INFO, operationInfo)) .subscribe()); } }).then(Mono.just(operationInfo)); }
batchOperationQueue = new ConcurrentLinkedDeque<>();
Mono<BlobBatchOperationInfo> prepareBlobBatchSubmission() { if (batchOperationQueue.isEmpty()) { return monoError(logger, new UnsupportedOperationException("Empty batch requests aren't allowed.")); } BlobBatchOperationInfo operationInfo = new BlobBatchOperationInfo(); Deque<BlobBatchOperation<?>> operations = batchOperationQueue; batchOperationQueue = new ConcurrentLinkedDeque<>(); return Flux.generate(sink -> { if (operations.isEmpty()) { operationInfo.finalizeBatchOperations(); sink.complete(); } else { BlobBatchOperation<?> batchOperation = operations.pop(); sink.next(batchOperation.getResponse() .subscriberContext(Context.of(BATCH_REQUEST_URL_PATH, batchOperation.getRequestUrlPath(), BATCH_OPERATION_RESPONSE, batchOperation.getBatchOperationResponse(), BATCH_OPERATION_INFO, operationInfo)) .subscribe()); } }).then(Mono.just(operationInfo)); }
class BlobBatch { private static final String X_MS_VERSION = "x-ms-version"; private static final String BATCH_REQUEST_URL_PATH = "Batch-Request-Url-Path"; private static final String BATCH_OPERATION_RESPONSE = "Batch-Operation-Response"; private static final String BATCH_OPERATION_INFO = "Batch-Operation-Info"; private static final String PATH_TEMPLATE = "%s/%s"; /* * Track the status codes expected for the batching operations here as the batch body does not get parsed in * Azure Core where this information is maintained. */ private static final int[] EXPECTED_DELETE_STATUS_CODES = {202}; private static final int[] EXPECTED_SET_TIER_STATUS_CODES = {200, 202}; private final ClientLogger logger = new ClientLogger(BlobBatch.class); private final BlobAsyncClient blobAsyncClient; private Deque<BlobBatchOperation<?>> batchOperationQueue; private BlobBatchType batchType; BlobBatch(String accountUrl, HttpPipeline pipeline) { boolean batchHeadersPolicySet = false; HttpPipelineBuilder batchPipelineBuilder = new HttpPipelineBuilder(); for (int i = 0; i < pipeline.getPolicyCount(); i++) { HttpPipelinePolicy policy = pipeline.getPolicy(i); if (policy instanceof StorageSharedKeyCredentialPolicy) { batchHeadersPolicySet = true; batchPipelineBuilder.policies(this::cleanseHeaders, this::setRequestUrl); } batchPipelineBuilder.policies(policy); } if (!batchHeadersPolicySet) { batchPipelineBuilder.policies(this::cleanseHeaders, this::setRequestUrl); } batchPipelineBuilder.policies(this::buildBatchOperation); this.blobAsyncClient = new BlobClientBuilder() .endpoint(accountUrl) .blobName("") .pipeline(batchPipelineBuilder.build()) .buildAsyncClient(); this.batchOperationQueue = new ConcurrentLinkedDeque<>(); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param containerName The container of the blob. * @param blobName The name of the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String containerName, String blobName) { return deleteBlobHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), null, null); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param containerName The container of the blob. * @param blobName The name of the blob. * @param deleteOptions Delete options for the blob and its snapshots. * @param blobRequestConditions Additional access conditions that must be met to allow this operation. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String containerName, String blobName, DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) { return deleteBlobHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), deleteOptions, blobRequestConditions); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String blobUrl) { return deleteBlobHelper(getUrlPath(blobUrl), null, null); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @param deleteOptions Delete options for the blob and its snapshots. * @param blobRequestConditions Additional access conditions that must be met to allow this operation. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String blobUrl, DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) { return deleteBlobHelper(getUrlPath(blobUrl), deleteOptions, blobRequestConditions); } private Response<Void> deleteBlobHelper(String urlPath, DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) { setBatchType(BlobBatchType.DELETE); return createBatchOperation(blobAsyncClient.deleteWithResponse(deleteOptions, blobRequestConditions), urlPath, EXPECTED_DELETE_STATUS_CODES); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param containerName The container of the blob. * @param blobName The name of the blob. * @param accessTier The tier to set on the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String containerName, String blobName, AccessTier accessTier) { return setBlobAccessTierHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), accessTier, null); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param containerName The container of the blob. * @param blobName The name of the blob. * @param accessTier The tier to set on the blob. * @param leaseId The lease ID the active lease on the blob must match. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String containerName, String blobName, AccessTier accessTier, String leaseId) { return setBlobAccessTierHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), accessTier, leaseId); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @param accessTier The tier to set on the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String blobUrl, AccessTier accessTier) { return setBlobAccessTierHelper(getUrlPath(blobUrl), accessTier, null); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @param accessTier The tier to set on the blob. * @param leaseId The lease ID the active lease on the blob must match. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String blobUrl, AccessTier accessTier, String leaseId) { return setBlobAccessTierHelper(getUrlPath(blobUrl), accessTier, leaseId); } private Response<Void> setBlobAccessTierHelper(String urlPath, AccessTier accessTier, String leaseId) { setBatchType(BlobBatchType.SET_TIER); return createBatchOperation(blobAsyncClient.setAccessTierWithResponse(accessTier, null, leaseId), urlPath, EXPECTED_SET_TIER_STATUS_CODES); } private <T> Response<T> createBatchOperation(Mono<Response<T>> response, String urlPath, int... expectedStatusCodes) { BlobBatchOperationResponse<T> batchOperationResponse = new BlobBatchOperationResponse<>(expectedStatusCodes); batchOperationQueue.add(new BlobBatchOperation<>(batchOperationResponse, response, urlPath)); return batchOperationResponse; } private String getUrlPath(String url) { return UrlBuilder.parse(url).getPath(); } private void setBatchType(BlobBatchType batchType) { if (this.batchType == null) { this.batchType = batchType; } else if (this.batchType != batchType) { throw logger.logExceptionAsError(new UnsupportedOperationException(String.format(Locale.ROOT, "'BlobBatch' only supports homogeneous operations and is a %s batch.", this.batchType))); } } /* * This performs a cleanup operation that would be handled when the request is sent through Netty or OkHttp. * Additionally, it removes the "x-ms-version" header from the request as batch operation requests cannot have this * and it adds the header "Content-Id" that allows the request to be mapped to the response. */ private Mono<HttpResponse> cleanseHeaders(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { context.getHttpRequest().getHeaders().remove(X_MS_VERSION); Map<String, String> headers = context.getHttpRequest().getHeaders().toMap(); headers.entrySet().removeIf(header -> header.getValue() == null); context.getHttpRequest().setHeaders(new HttpHeaders(headers)); return next.process(); } /* * This performs changing the request URL to the value passed through the pipeline context. This policy is used in * place of constructing a new client for each batch request that is being sent. */ private Mono<HttpResponse> setRequestUrl(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { try { UrlBuilder requestUrl = UrlBuilder.parse(context.getHttpRequest().getUrl()); requestUrl.setPath(context.getData(BATCH_REQUEST_URL_PATH).get().toString()); context.getHttpRequest().setUrl(requestUrl.toUrl()); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(Exceptions.propagate(new IllegalStateException(ex))); } return next.process(); } /* * This will "send" the batch operation request when triggered, it simply acts as a way to build and write the * batch operation into the overall request and then returns nothing as the response. */ private Mono<HttpResponse> buildBatchOperation(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { BlobBatchOperationInfo operationInfo = (BlobBatchOperationInfo) context.getData(BATCH_OPERATION_INFO).get(); BlobBatchOperationResponse<?> batchOperationResponse = (BlobBatchOperationResponse<?>) context.getData(BATCH_OPERATION_RESPONSE).get(); operationInfo.addBatchOperation(batchOperationResponse, context.getHttpRequest()); return Mono.empty(); } }
class BlobBatch { private static final String X_MS_VERSION = "x-ms-version"; private static final String BATCH_REQUEST_URL_PATH = "Batch-Request-Url-Path"; private static final String BATCH_OPERATION_RESPONSE = "Batch-Operation-Response"; private static final String BATCH_OPERATION_INFO = "Batch-Operation-Info"; private static final String PATH_TEMPLATE = "%s/%s"; /* * Track the status codes expected for the batching operations here as the batch body does not get parsed in * Azure Core where this information is maintained. */ private static final int[] EXPECTED_DELETE_STATUS_CODES = {202}; private static final int[] EXPECTED_SET_TIER_STATUS_CODES = {200, 202}; private final ClientLogger logger = new ClientLogger(BlobBatch.class); private final BlobAsyncClient blobAsyncClient; private Deque<BlobBatchOperation<?>> batchOperationQueue; private BlobBatchType batchType; BlobBatch(String accountUrl, HttpPipeline pipeline) { boolean batchHeadersPolicySet = false; HttpPipelineBuilder batchPipelineBuilder = new HttpPipelineBuilder(); for (int i = 0; i < pipeline.getPolicyCount(); i++) { HttpPipelinePolicy policy = pipeline.getPolicy(i); if (policy instanceof StorageSharedKeyCredentialPolicy) { batchHeadersPolicySet = true; batchPipelineBuilder.policies(this::cleanseHeaders, this::setRequestUrl); } batchPipelineBuilder.policies(policy); } if (!batchHeadersPolicySet) { batchPipelineBuilder.policies(this::cleanseHeaders, this::setRequestUrl); } batchPipelineBuilder.policies(this::buildBatchOperation); this.blobAsyncClient = new BlobClientBuilder() .endpoint(accountUrl) .blobName("") .pipeline(batchPipelineBuilder.build()) .buildAsyncClient(); this.batchOperationQueue = new ConcurrentLinkedDeque<>(); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param containerName The container of the blob. * @param blobName The name of the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String containerName, String blobName) { return deleteBlobHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), null, null); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param containerName The container of the blob. * @param blobName The name of the blob. * @param deleteOptions Delete options for the blob and its snapshots. * @param blobRequestConditions Additional access conditions that must be met to allow this operation. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String containerName, String blobName, DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) { return deleteBlobHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), deleteOptions, blobRequestConditions); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String blobUrl) { return deleteBlobHelper(getUrlPath(blobUrl), null, null); } /** * Adds a delete blob operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @param deleteOptions Delete options for the blob and its snapshots. * @param blobRequestConditions Additional access conditions that must be met to allow this operation. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> deleteBlob(String blobUrl, DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) { return deleteBlobHelper(getUrlPath(blobUrl), deleteOptions, blobRequestConditions); } private Response<Void> deleteBlobHelper(String urlPath, DeleteSnapshotsOptionType deleteOptions, BlobRequestConditions blobRequestConditions) { setBatchType(BlobBatchType.DELETE); return createBatchOperation(blobAsyncClient.deleteWithResponse(deleteOptions, blobRequestConditions), urlPath, EXPECTED_DELETE_STATUS_CODES); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param containerName The container of the blob. * @param blobName The name of the blob. * @param accessTier The tier to set on the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String containerName, String blobName, AccessTier accessTier) { return setBlobAccessTierHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), accessTier, null); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param containerName The container of the blob. * @param blobName The name of the blob. * @param accessTier The tier to set on the blob. * @param leaseId The lease ID the active lease on the blob must match. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String containerName, String blobName, AccessTier accessTier, String leaseId) { return setBlobAccessTierHelper(String.format(PATH_TEMPLATE, containerName, Utility.urlEncode(Utility.urlDecode(blobName))), accessTier, leaseId); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @param accessTier The tier to set on the blob. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String blobUrl, AccessTier accessTier) { return setBlobAccessTierHelper(getUrlPath(blobUrl), accessTier, null); } /** * Adds a set tier operation to the batch. * * <p><strong>Code sample</strong></p> * * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @param accessTier The tier to set on the blob. * @param leaseId The lease ID the active lease on the blob must match. * @return a {@link Response} that will be used to associate this operation to the response when the batch is * submitted. * @throws UnsupportedOperationException If this batch has already added an operation of another type. */ public Response<Void> setBlobAccessTier(String blobUrl, AccessTier accessTier, String leaseId) { return setBlobAccessTierHelper(getUrlPath(blobUrl), accessTier, leaseId); } private Response<Void> setBlobAccessTierHelper(String urlPath, AccessTier accessTier, String leaseId) { setBatchType(BlobBatchType.SET_TIER); return createBatchOperation(blobAsyncClient.setAccessTierWithResponse(accessTier, null, leaseId), urlPath, EXPECTED_SET_TIER_STATUS_CODES); } private <T> Response<T> createBatchOperation(Mono<Response<T>> response, String urlPath, int... expectedStatusCodes) { BlobBatchOperationResponse<T> batchOperationResponse = new BlobBatchOperationResponse<>(expectedStatusCodes); batchOperationQueue.add(new BlobBatchOperation<>(batchOperationResponse, response, urlPath)); return batchOperationResponse; } private String getUrlPath(String url) { return UrlBuilder.parse(url).getPath(); } private void setBatchType(BlobBatchType batchType) { if (this.batchType == null) { this.batchType = batchType; } else if (this.batchType != batchType) { throw logger.logExceptionAsError(new UnsupportedOperationException(String.format(Locale.ROOT, "'BlobBatch' only supports homogeneous operations and is a %s batch.", this.batchType))); } } /* * This performs a cleanup operation that would be handled when the request is sent through Netty or OkHttp. * Additionally, it removes the "x-ms-version" header from the request as batch operation requests cannot have this * and it adds the header "Content-Id" that allows the request to be mapped to the response. */ private Mono<HttpResponse> cleanseHeaders(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { context.getHttpRequest().getHeaders().remove(X_MS_VERSION); Map<String, String> headers = context.getHttpRequest().getHeaders().toMap(); headers.entrySet().removeIf(header -> header.getValue() == null); context.getHttpRequest().setHeaders(new HttpHeaders(headers)); return next.process(); } /* * This performs changing the request URL to the value passed through the pipeline context. This policy is used in * place of constructing a new client for each batch request that is being sent. */ private Mono<HttpResponse> setRequestUrl(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { try { UrlBuilder requestUrl = UrlBuilder.parse(context.getHttpRequest().getUrl()); requestUrl.setPath(context.getData(BATCH_REQUEST_URL_PATH).get().toString()); context.getHttpRequest().setUrl(requestUrl.toUrl()); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(Exceptions.propagate(new IllegalStateException(ex))); } return next.process(); } /* * This will "send" the batch operation request when triggered, it simply acts as a way to build and write the * batch operation into the overall request and then returns nothing as the response. */ private Mono<HttpResponse> buildBatchOperation(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { BlobBatchOperationInfo operationInfo = (BlobBatchOperationInfo) context.getData(BATCH_OPERATION_INFO).get(); BlobBatchOperationResponse<?> batchOperationResponse = (BlobBatchOperationResponse<?>) context.getData(BATCH_OPERATION_RESPONSE).get(); operationInfo.addBatchOperation(batchOperationResponse, context.getHttpRequest()); return Mono.empty(); } }
Add a comment as to why this header is filtered.
void addBatchOperation(BlobBatchOperationResponse<?> batchOperation, HttpRequest request) { int contentId = this.contentId.getAndIncrement(); StringBuilder batchRequestBuilder = new StringBuilder(); appendWithNewline(batchRequestBuilder, "--" + batchBoundary); appendWithNewline(batchRequestBuilder, BATCH_OPERATION_CONTENT_TYPE); appendWithNewline(batchRequestBuilder, BATCH_OPERATION_CONTENT_TRANSFER_ENCODING); appendWithNewline(batchRequestBuilder, String.format(BATCH_OPERATION_CONTENT_ID_TEMPLATE, contentId)); batchRequestBuilder.append(BlobBatchHelper.HTTP_NEWLINE); String method = request.getHttpMethod().toString(); String urlPath = request.getUrl().getPath(); String urlQuery = request.getUrl().getQuery(); if (!CoreUtils.isNullOrEmpty(urlQuery)) { urlPath = urlPath + "?" + urlQuery; } appendWithNewline(batchRequestBuilder, String.format(OPERATION_TEMPLATE, method, urlPath, HTTP_VERSION)); request.getHeaders().stream() .filter(header -> !X_MS_VERSION.equalsIgnoreCase(header.getName())) .forEach(header -> appendWithNewline(batchRequestBuilder, String.format(HEADER_TEMPLATE, header.getName(), header.getValue()))); batchRequestBuilder.append(BlobBatchHelper.HTTP_NEWLINE); batchOperationResponseMap.put(contentId, batchOperation.setRequest(request)); batchOperations.add(ByteBuffer.wrap(batchRequestBuilder.toString().getBytes(StandardCharsets.UTF_8))); }
.filter(header -> !X_MS_VERSION.equalsIgnoreCase(header.getName()))
void addBatchOperation(BlobBatchOperationResponse<?> batchOperation, HttpRequest request) { int contentId = this.contentId.getAndIncrement(); StringBuilder batchRequestBuilder = new StringBuilder(); appendWithNewline(batchRequestBuilder, "--" + batchBoundary); appendWithNewline(batchRequestBuilder, BATCH_OPERATION_CONTENT_TYPE); appendWithNewline(batchRequestBuilder, BATCH_OPERATION_CONTENT_TRANSFER_ENCODING); appendWithNewline(batchRequestBuilder, String.format(BATCH_OPERATION_CONTENT_ID_TEMPLATE, contentId)); batchRequestBuilder.append(BlobBatchHelper.HTTP_NEWLINE); String method = request.getHttpMethod().toString(); String urlPath = request.getUrl().getPath(); String urlQuery = request.getUrl().getQuery(); if (!CoreUtils.isNullOrEmpty(urlQuery)) { urlPath = urlPath + "?" + urlQuery; } appendWithNewline(batchRequestBuilder, String.format(OPERATION_TEMPLATE, method, urlPath, HTTP_VERSION)); /* * The 'x-ms-version' header is removed from batch operations as all batch operations will use the * 'x-ms-version' used in the batch request. This header is illegal and will fail the batch request if present * in any operation. */ request.getHeaders().stream() .filter(header -> !X_MS_VERSION.equalsIgnoreCase(header.getName())) .forEach(header -> appendWithNewline(batchRequestBuilder, String.format(HEADER_TEMPLATE, header.getName(), header.getValue()))); batchRequestBuilder.append(BlobBatchHelper.HTTP_NEWLINE); batchOperationResponseMap.put(contentId, batchOperation.setRequest(request)); batchOperations.add(ByteBuffer.wrap(batchRequestBuilder.toString().getBytes(StandardCharsets.UTF_8))); }
class BlobBatchOperationInfo { private static final String X_MS_VERSION = "x-ms-version"; private static final String BATCH_BOUNDARY_TEMPLATE = "batch_%s"; private static final String REQUEST_CONTENT_TYPE_TEMPLATE = "multipart/mixed; boundary=%s"; private static final String BATCH_OPERATION_CONTENT_TYPE = "Content-Type: application/http"; private static final String BATCH_OPERATION_CONTENT_TRANSFER_ENCODING = "Content-Transfer-Encoding: binary"; private static final String BATCH_OPERATION_CONTENT_ID_TEMPLATE = "Content-ID: %d"; private static final String HTTP_VERSION = "HTTP/1.1"; private static final String OPERATION_TEMPLATE = "%s %s %s"; private static final String HEADER_TEMPLATE = "%s: %s"; private final AtomicInteger contentId; private final String batchBoundary; private final String contentType; private final Collection<ByteBuffer> batchOperations; private final Map<Integer, BlobBatchOperationResponse<?>> batchOperationResponseMap; /** * Creates a {@link BlobBatchOperationInfo} which contains all information necessary for submitting a batch * operation. */ BlobBatchOperationInfo() { this.contentId = new AtomicInteger(); this.batchBoundary = String.format(BATCH_BOUNDARY_TEMPLATE, UUID.randomUUID()); this.contentType = String.format(REQUEST_CONTENT_TYPE_TEMPLATE, batchBoundary); this.batchOperations = new ConcurrentLinkedQueue<>(); this.batchOperationResponseMap = new ConcurrentHashMap<>(); } /* * Gets the body for the batch operation. * * @return Request body. */ Collection<ByteBuffer> getBody() { return batchOperations; } /* * Gets the size of the batch operation request. * * @return Size of the request body. */ long getContentLength() { return batchOperations.stream().map(buffer -> (long) buffer.remaining()).reduce(0L, Long::sum); } /* * Gets the Content-Type header for the batch operation request. * * @return Content-Type header for the request. */ String getContentType() { return contentType; } /* * Adds an operation to the operation set being submitted in the batch. * * @param batchOperation Operation to add to the batch. * @param request The {@link HttpRequest} for the operation. */ /* * Completes the batch by adding the final boundary identifier to the request body. */ void finalizeBatchOperations() { batchOperations.add(ByteBuffer.wrap(String.format("--%s--%s", batchBoundary, BlobBatchHelper.HTTP_NEWLINE) .getBytes(StandardCharsets.UTF_8))); } /* * Gets the batch operation with the passed Content-ID. * * @param contentId Content-ID of the operation. * @return The {@link BlobBatchOperationResponse} correlated to the passed Content-ID. */ BlobBatchOperationResponse<?> getBatchRequest(int contentId) { return batchOperationResponseMap.get(contentId); } /* * Gets the number of operations contained in the batch. * * @return Number of operations in the batch. */ int getOperationCount() { return batchOperationResponseMap.size(); } private static void appendWithNewline(StringBuilder stringBuilder, String value) { stringBuilder.append(value).append(BlobBatchHelper.HTTP_NEWLINE); } }
class BlobBatchOperationInfo { private static final String X_MS_VERSION = "x-ms-version"; private static final String BATCH_BOUNDARY_TEMPLATE = "batch_%s"; private static final String REQUEST_CONTENT_TYPE_TEMPLATE = "multipart/mixed; boundary=%s"; private static final String BATCH_OPERATION_CONTENT_TYPE = "Content-Type: application/http"; private static final String BATCH_OPERATION_CONTENT_TRANSFER_ENCODING = "Content-Transfer-Encoding: binary"; private static final String BATCH_OPERATION_CONTENT_ID_TEMPLATE = "Content-ID: %d"; private static final String HTTP_VERSION = "HTTP/1.1"; private static final String OPERATION_TEMPLATE = "%s %s %s"; private static final String HEADER_TEMPLATE = "%s: %s"; private final AtomicInteger contentId; private final String batchBoundary; private final String contentType; private final Collection<ByteBuffer> batchOperations; private final Map<Integer, BlobBatchOperationResponse<?>> batchOperationResponseMap; /** * Creates a {@link BlobBatchOperationInfo} which contains all information necessary for submitting a batch * operation. */ BlobBatchOperationInfo() { this.contentId = new AtomicInteger(); this.batchBoundary = String.format(BATCH_BOUNDARY_TEMPLATE, UUID.randomUUID()); this.contentType = String.format(REQUEST_CONTENT_TYPE_TEMPLATE, batchBoundary); this.batchOperations = new ConcurrentLinkedQueue<>(); this.batchOperationResponseMap = new ConcurrentHashMap<>(); } /* * Gets the body for the batch operation. * * @return Request body. */ Collection<ByteBuffer> getBody() { return batchOperations; } /* * Gets the size of the batch operation request. * * @return Size of the request body. */ long getContentLength() { return batchOperations.stream().map(buffer -> (long) buffer.remaining()).reduce(0L, Long::sum); } /* * Gets the Content-Type header for the batch operation request. * * @return Content-Type header for the request. */ String getContentType() { return contentType; } /* * Adds an operation to the operation set being submitted in the batch. * * @param batchOperation Operation to add to the batch. * @param request The {@link HttpRequest} for the operation. */ /* * Completes the batch by adding the final boundary identifier to the request body. */ void finalizeBatchOperations() { batchOperations.add(ByteBuffer.wrap(String.format("--%s--%s", batchBoundary, BlobBatchHelper.HTTP_NEWLINE) .getBytes(StandardCharsets.UTF_8))); } /* * Gets the batch operation with the passed Content-ID. * * @param contentId Content-ID of the operation. * @return The {@link BlobBatchOperationResponse} correlated to the passed Content-ID. */ BlobBatchOperationResponse<?> getBatchRequest(int contentId) { return batchOperationResponseMap.get(contentId); } /* * Gets the number of operations contained in the batch. * * @return Number of operations in the batch. */ int getOperationCount() { return batchOperationResponseMap.size(); } private static void appendWithNewline(StringBuilder stringBuilder, String value) { stringBuilder.append(value).append(BlobBatchHelper.HTTP_NEWLINE); } }
Should we add a message to this RuntimeException before bubbling it up? Could be useful because often the first thing I do is "Ctrl+F" for the message in code. There is another instance.
void startPartitionPump(PartitionOwnership claimedOwnership, Checkpoint checkpoint) { if (partitionPumps.containsKey(claimedOwnership.getPartitionId())) { logger.info("Consumer is already running for this partition {}", claimedOwnership.getPartitionId()); return; } PartitionContext partitionContext = new PartitionContext(claimedOwnership.getFullyQualifiedNamespace(), claimedOwnership.getEventHubName(), claimedOwnership.getConsumerGroup(), claimedOwnership.getPartitionId()); PartitionProcessor partitionProcessor = this.partitionProcessorFactory.get(); InitializationContext initializationContext = new InitializationContext(partitionContext, EventPosition.earliest()); partitionProcessor.initialize(initializationContext); EventPosition startFromEventPosition = null; if (checkpoint != null && checkpoint.getOffset() != null) { startFromEventPosition = EventPosition.fromOffset(checkpoint.getOffset()); } else if (checkpoint != null && checkpoint.getSequenceNumber() != null) { startFromEventPosition = EventPosition.fromSequenceNumber(checkpoint.getSequenceNumber(), true); } else { startFromEventPosition = initializationContext.getInitialPosition(); } ReceiveOptions receiveOptions = new ReceiveOptions().setOwnerLevel(0L) .setTrackLastEnqueuedEventProperties(trackLastEnqueuedEventProperties); EventHubConsumerAsyncClient eventHubConsumer = eventHubClientBuilder.buildAsyncClient() .createConsumer(claimedOwnership.getConsumerGroup(), EventHubClientBuilder.DEFAULT_PREFETCH_COUNT); partitionPumps.put(claimedOwnership.getPartitionId(), eventHubConsumer); eventHubConsumer.receiveFromPartition(claimedOwnership.getPartitionId(), startFromEventPosition, receiveOptions) .subscribe(partitionEvent -> { EventData eventData = partitionEvent.getData(); Context processSpanContext = startProcessTracingSpan(eventData, eventHubConsumer.getEventHubName(), eventHubConsumer.getFullyQualifiedNamespace()); if (processSpanContext.getData(SPAN_CONTEXT_KEY).isPresent()) { eventData.addContext(SPAN_CONTEXT_KEY, processSpanContext); } try { partitionProcessor.processEvent(new EventContext(partitionContext, eventData, checkpointStore, partitionEvent.getLastEnqueuedEventProperties())); endProcessTracingSpan(processSpanContext, Signal.complete()); } catch (Throwable throwable) { /* user code for event processing threw an exception - log and bubble up */ endProcessTracingSpan(processSpanContext, Signal.error(throwable)); throw logger.logExceptionAsError(new RuntimeException(throwable)); } }, /* EventHubConsumer receive() returned an error */ ex -> handleReceiveError(claimedOwnership, eventHubConsumer, partitionProcessor, ex, partitionContext), () -> partitionProcessor.close(new CloseContext(partitionContext, CloseReason.EVENT_PROCESSOR_SHUTDOWN))); }
throw logger.logExceptionAsError(new RuntimeException(throwable));
void startPartitionPump(PartitionOwnership claimedOwnership, Checkpoint checkpoint) { if (partitionPumps.containsKey(claimedOwnership.getPartitionId())) { logger.info("Consumer is already running for this partition {}", claimedOwnership.getPartitionId()); return; } PartitionContext partitionContext = new PartitionContext(claimedOwnership.getFullyQualifiedNamespace(), claimedOwnership.getEventHubName(), claimedOwnership.getConsumerGroup(), claimedOwnership.getPartitionId()); PartitionProcessor partitionProcessor = this.partitionProcessorFactory.get(); InitializationContext initializationContext = new InitializationContext(partitionContext, EventPosition.earliest()); partitionProcessor.initialize(initializationContext); EventPosition startFromEventPosition = null; if (checkpoint != null && checkpoint.getOffset() != null) { startFromEventPosition = EventPosition.fromOffset(checkpoint.getOffset()); } else if (checkpoint != null && checkpoint.getSequenceNumber() != null) { startFromEventPosition = EventPosition.fromSequenceNumber(checkpoint.getSequenceNumber()); } else { startFromEventPosition = initializationContext.getInitialPosition(); } ReceiveOptions receiveOptions = new ReceiveOptions().setOwnerLevel(0L) .setTrackLastEnqueuedEventProperties(trackLastEnqueuedEventProperties); EventHubConsumerAsyncClient eventHubConsumer = eventHubClientBuilder.buildAsyncClient() .createConsumer(claimedOwnership.getConsumerGroup(), EventHubClientBuilder.DEFAULT_PREFETCH_COUNT); partitionPumps.put(claimedOwnership.getPartitionId(), eventHubConsumer); eventHubConsumer.receiveFromPartition(claimedOwnership.getPartitionId(), startFromEventPosition, receiveOptions) .subscribe(partitionEvent -> { EventData eventData = partitionEvent.getData(); Context processSpanContext = startProcessTracingSpan(eventData, eventHubConsumer.getEventHubName(), eventHubConsumer.getFullyQualifiedNamespace()); if (processSpanContext.getData(SPAN_CONTEXT_KEY).isPresent()) { eventData.addContext(SPAN_CONTEXT_KEY, processSpanContext); } try { partitionProcessor.processEvent(new EventContext(partitionContext, eventData, checkpointStore, partitionEvent.getLastEnqueuedEventProperties())); endProcessTracingSpan(processSpanContext, Signal.complete()); } catch (Throwable throwable) { /* user code for event processing threw an exception - log and bubble up */ endProcessTracingSpan(processSpanContext, Signal.error(throwable)); throw logger.logExceptionAsError(new RuntimeException("Error in event processing callback", throwable)); } }, /* EventHubConsumer receive() returned an error */ ex -> handleReceiveError(claimedOwnership, eventHubConsumer, partitionProcessor, ex, partitionContext), () -> partitionProcessor.close(new CloseContext(partitionContext, CloseReason.EVENT_PROCESSOR_SHUTDOWN))); }
class PartitionPumpManager { private final ClientLogger logger = new ClientLogger(PartitionPumpManager.class); private final CheckpointStore checkpointStore; private final Map<String, EventHubConsumerAsyncClient> partitionPumps = new ConcurrentHashMap<>(); private final Supplier<PartitionProcessor> partitionProcessorFactory; private final EventHubClientBuilder eventHubClientBuilder; private final TracerProvider tracerProvider; private final boolean trackLastEnqueuedEventProperties; /** * Creates an instance of partition pump manager. * * @param checkpointStore The partition manager that is used to store and update checkpoints. * @param partitionProcessorFactory The partition processor factory that is used to create new instances of {@link * PartitionProcessor} when new partition pumps are started. * @param eventHubClientBuilder The client builder used to create new clients (and new connections) for each * partition processed by this {@link EventProcessorClient}. * @param trackLastEnqueuedEventProperties If set to {@code true}, all events received by this * EventProcessorClient will also include the last enqueued event properties for it's respective partitions. * @param tracerProvider The tracer implementation. */ PartitionPumpManager(CheckpointStore checkpointStore, Supplier<PartitionProcessor> partitionProcessorFactory, EventHubClientBuilder eventHubClientBuilder, boolean trackLastEnqueuedEventProperties, TracerProvider tracerProvider) { this.checkpointStore = checkpointStore; this.partitionProcessorFactory = partitionProcessorFactory; this.eventHubClientBuilder = eventHubClientBuilder; this.trackLastEnqueuedEventProperties = trackLastEnqueuedEventProperties; this.tracerProvider = tracerProvider; } /** * Stops all partition pumps that are actively consuming events. This method is invoked when the {@link * EventProcessorClient} is requested to stop. */ void stopAllPartitionPumps() { this.partitionPumps.forEach((partitionId, eventHubConsumer) -> { try { eventHubConsumer.close(); } catch (Exception ex) { logger.warning(Messages.FAILED_CLOSE_CONSUMER_PARTITION, partitionId, ex); } finally { partitionPumps.remove(partitionId); } }); } /** * Starts a new partition pump for the newly claimed partition. If the partition already has an active partition * pump, this will not create a new consumer. * * @param claimedOwnership The details of partition ownership for which new partition pump is requested to start. */ private void handleReceiveError(PartitionOwnership claimedOwnership, EventHubConsumerAsyncClient eventHubConsumer, PartitionProcessor partitionProcessor, Throwable throwable, PartitionContext partitionContext) { try { logger.warning("Error receiving events for partition {}", partitionContext.getPartitionId(), throwable); partitionProcessor.processError(new ErrorContext(partitionContext, throwable)); CloseReason closeReason = CloseReason.LOST_PARTITION_OWNERSHIP; partitionProcessor.close(new CloseContext(partitionContext, closeReason)); } catch (Exception ex) { logger.warning(Messages.FAILED_PROCESSING_ERROR_RECEIVE, claimedOwnership.getPartitionId(), ex); } finally { try { eventHubConsumer.close(); } finally { partitionPumps.remove(claimedOwnership.getPartitionId()); } } } /* * Starts a new process tracing span and attaches the returned context to the EventData object for users. */ private Context startProcessTracingSpan(EventData eventData, String eventHubName, String fullyQualifiedNamespace) { Object diagnosticId = eventData.getProperties().get(DIAGNOSTIC_ID_KEY); if (diagnosticId == null || !tracerProvider.isEnabled()) { return Context.NONE; } Context spanContext = tracerProvider.extractContext(diagnosticId.toString(), Context.NONE); Context entityContext = spanContext.addData(ENTITY_PATH_KEY, eventHubName); return tracerProvider.startSpan(entityContext.addData(HOST_NAME_KEY, fullyQualifiedNamespace), ProcessKind.PROCESS); } /* * Ends the process tracing span and the scope of that span. */ private void endProcessTracingSpan(Context processSpanContext, Signal<Void> signal) { Optional<Object> spanScope = processSpanContext.getData(SCOPE_KEY); if (!spanScope.isPresent() || !tracerProvider.isEnabled()) { return; } if (spanScope.get() instanceof Closeable) { Closeable close = (Closeable) processSpanContext.getData(SCOPE_KEY).get(); try { close.close(); tracerProvider.endSpan(processSpanContext, signal); } catch (IOException ioException) { logger.error(Messages.EVENT_PROCESSOR_RUN_END, ioException); } } else { logger.warning(String.format(Locale.US, Messages.PROCESS_SPAN_SCOPE_TYPE_ERROR, spanScope.get() != null ? spanScope.getClass() : "null")); } } }
class PartitionPumpManager { private final ClientLogger logger = new ClientLogger(PartitionPumpManager.class); private final CheckpointStore checkpointStore; private final Map<String, EventHubConsumerAsyncClient> partitionPumps = new ConcurrentHashMap<>(); private final Supplier<PartitionProcessor> partitionProcessorFactory; private final EventHubClientBuilder eventHubClientBuilder; private final TracerProvider tracerProvider; private final boolean trackLastEnqueuedEventProperties; /** * Creates an instance of partition pump manager. * * @param checkpointStore The partition manager that is used to store and update checkpoints. * @param partitionProcessorFactory The partition processor factory that is used to create new instances of {@link * PartitionProcessor} when new partition pumps are started. * @param eventHubClientBuilder The client builder used to create new clients (and new connections) for each * partition processed by this {@link EventProcessorClient}. * @param trackLastEnqueuedEventProperties If set to {@code true}, all events received by this EventProcessorClient * will also include the last enqueued event properties for it's respective partitions. * @param tracerProvider The tracer implementation. */ PartitionPumpManager(CheckpointStore checkpointStore, Supplier<PartitionProcessor> partitionProcessorFactory, EventHubClientBuilder eventHubClientBuilder, boolean trackLastEnqueuedEventProperties, TracerProvider tracerProvider) { this.checkpointStore = checkpointStore; this.partitionProcessorFactory = partitionProcessorFactory; this.eventHubClientBuilder = eventHubClientBuilder; this.trackLastEnqueuedEventProperties = trackLastEnqueuedEventProperties; this.tracerProvider = tracerProvider; } /** * Stops all partition pumps that are actively consuming events. This method is invoked when the {@link * EventProcessorClient} is requested to stop. */ void stopAllPartitionPumps() { this.partitionPumps.forEach((partitionId, eventHubConsumer) -> { try { eventHubConsumer.close(); } catch (Exception ex) { logger.warning(Messages.FAILED_CLOSE_CONSUMER_PARTITION, partitionId, ex); } finally { partitionPumps.remove(partitionId); } }); } /** * Starts a new partition pump for the newly claimed partition. If the partition already has an active partition * pump, this will not create a new consumer. * * @param claimedOwnership The details of partition ownership for which new partition pump is requested to start. */ private void handleReceiveError(PartitionOwnership claimedOwnership, EventHubConsumerAsyncClient eventHubConsumer, PartitionProcessor partitionProcessor, Throwable throwable, PartitionContext partitionContext) { try { logger.warning("Error receiving events for partition {}", partitionContext.getPartitionId(), throwable); partitionProcessor.processError(new ErrorContext(partitionContext, throwable)); CloseReason closeReason = CloseReason.LOST_PARTITION_OWNERSHIP; partitionProcessor.close(new CloseContext(partitionContext, closeReason)); } catch (Exception ex) { logger.warning(Messages.FAILED_PROCESSING_ERROR_RECEIVE, claimedOwnership.getPartitionId(), ex); } finally { try { eventHubConsumer.close(); } finally { partitionPumps.remove(claimedOwnership.getPartitionId()); } } } /* * Starts a new process tracing span and attaches the returned context to the EventData object for users. */ private Context startProcessTracingSpan(EventData eventData, String eventHubName, String fullyQualifiedNamespace) { Object diagnosticId = eventData.getProperties().get(DIAGNOSTIC_ID_KEY); if (diagnosticId == null || !tracerProvider.isEnabled()) { return Context.NONE; } Context spanContext = tracerProvider.extractContext(diagnosticId.toString(), Context.NONE); Context entityContext = spanContext.addData(ENTITY_PATH_KEY, eventHubName); return tracerProvider.startSpan(entityContext.addData(HOST_NAME_KEY, fullyQualifiedNamespace), ProcessKind.PROCESS); } /* * Ends the process tracing span and the scope of that span. */ private void endProcessTracingSpan(Context processSpanContext, Signal<Void> signal) { Optional<Object> spanScope = processSpanContext.getData(SCOPE_KEY); if (!spanScope.isPresent() || !tracerProvider.isEnabled()) { return; } if (spanScope.get() instanceof Closeable) { Closeable close = (Closeable) processSpanContext.getData(SCOPE_KEY).get(); try { close.close(); tracerProvider.endSpan(processSpanContext, signal); } catch (IOException ioException) { logger.error(Messages.EVENT_PROCESSOR_RUN_END, ioException); } } else { logger.warning(String.format(Locale.US, Messages.PROCESS_SPAN_SCOPE_TYPE_ERROR, spanScope.get() != null ? spanScope.getClass() : "null")); } } }
Added message to both exceptions.
void startPartitionPump(PartitionOwnership claimedOwnership, Checkpoint checkpoint) { if (partitionPumps.containsKey(claimedOwnership.getPartitionId())) { logger.info("Consumer is already running for this partition {}", claimedOwnership.getPartitionId()); return; } PartitionContext partitionContext = new PartitionContext(claimedOwnership.getFullyQualifiedNamespace(), claimedOwnership.getEventHubName(), claimedOwnership.getConsumerGroup(), claimedOwnership.getPartitionId()); PartitionProcessor partitionProcessor = this.partitionProcessorFactory.get(); InitializationContext initializationContext = new InitializationContext(partitionContext, EventPosition.earliest()); partitionProcessor.initialize(initializationContext); EventPosition startFromEventPosition = null; if (checkpoint != null && checkpoint.getOffset() != null) { startFromEventPosition = EventPosition.fromOffset(checkpoint.getOffset()); } else if (checkpoint != null && checkpoint.getSequenceNumber() != null) { startFromEventPosition = EventPosition.fromSequenceNumber(checkpoint.getSequenceNumber(), true); } else { startFromEventPosition = initializationContext.getInitialPosition(); } ReceiveOptions receiveOptions = new ReceiveOptions().setOwnerLevel(0L) .setTrackLastEnqueuedEventProperties(trackLastEnqueuedEventProperties); EventHubConsumerAsyncClient eventHubConsumer = eventHubClientBuilder.buildAsyncClient() .createConsumer(claimedOwnership.getConsumerGroup(), EventHubClientBuilder.DEFAULT_PREFETCH_COUNT); partitionPumps.put(claimedOwnership.getPartitionId(), eventHubConsumer); eventHubConsumer.receiveFromPartition(claimedOwnership.getPartitionId(), startFromEventPosition, receiveOptions) .subscribe(partitionEvent -> { EventData eventData = partitionEvent.getData(); Context processSpanContext = startProcessTracingSpan(eventData, eventHubConsumer.getEventHubName(), eventHubConsumer.getFullyQualifiedNamespace()); if (processSpanContext.getData(SPAN_CONTEXT_KEY).isPresent()) { eventData.addContext(SPAN_CONTEXT_KEY, processSpanContext); } try { partitionProcessor.processEvent(new EventContext(partitionContext, eventData, checkpointStore, partitionEvent.getLastEnqueuedEventProperties())); endProcessTracingSpan(processSpanContext, Signal.complete()); } catch (Throwable throwable) { /* user code for event processing threw an exception - log and bubble up */ endProcessTracingSpan(processSpanContext, Signal.error(throwable)); throw logger.logExceptionAsError(new RuntimeException(throwable)); } }, /* EventHubConsumer receive() returned an error */ ex -> handleReceiveError(claimedOwnership, eventHubConsumer, partitionProcessor, ex, partitionContext), () -> partitionProcessor.close(new CloseContext(partitionContext, CloseReason.EVENT_PROCESSOR_SHUTDOWN))); }
throw logger.logExceptionAsError(new RuntimeException(throwable));
void startPartitionPump(PartitionOwnership claimedOwnership, Checkpoint checkpoint) { if (partitionPumps.containsKey(claimedOwnership.getPartitionId())) { logger.info("Consumer is already running for this partition {}", claimedOwnership.getPartitionId()); return; } PartitionContext partitionContext = new PartitionContext(claimedOwnership.getFullyQualifiedNamespace(), claimedOwnership.getEventHubName(), claimedOwnership.getConsumerGroup(), claimedOwnership.getPartitionId()); PartitionProcessor partitionProcessor = this.partitionProcessorFactory.get(); InitializationContext initializationContext = new InitializationContext(partitionContext, EventPosition.earliest()); partitionProcessor.initialize(initializationContext); EventPosition startFromEventPosition = null; if (checkpoint != null && checkpoint.getOffset() != null) { startFromEventPosition = EventPosition.fromOffset(checkpoint.getOffset()); } else if (checkpoint != null && checkpoint.getSequenceNumber() != null) { startFromEventPosition = EventPosition.fromSequenceNumber(checkpoint.getSequenceNumber()); } else { startFromEventPosition = initializationContext.getInitialPosition(); } ReceiveOptions receiveOptions = new ReceiveOptions().setOwnerLevel(0L) .setTrackLastEnqueuedEventProperties(trackLastEnqueuedEventProperties); EventHubConsumerAsyncClient eventHubConsumer = eventHubClientBuilder.buildAsyncClient() .createConsumer(claimedOwnership.getConsumerGroup(), EventHubClientBuilder.DEFAULT_PREFETCH_COUNT); partitionPumps.put(claimedOwnership.getPartitionId(), eventHubConsumer); eventHubConsumer.receiveFromPartition(claimedOwnership.getPartitionId(), startFromEventPosition, receiveOptions) .subscribe(partitionEvent -> { EventData eventData = partitionEvent.getData(); Context processSpanContext = startProcessTracingSpan(eventData, eventHubConsumer.getEventHubName(), eventHubConsumer.getFullyQualifiedNamespace()); if (processSpanContext.getData(SPAN_CONTEXT_KEY).isPresent()) { eventData.addContext(SPAN_CONTEXT_KEY, processSpanContext); } try { partitionProcessor.processEvent(new EventContext(partitionContext, eventData, checkpointStore, partitionEvent.getLastEnqueuedEventProperties())); endProcessTracingSpan(processSpanContext, Signal.complete()); } catch (Throwable throwable) { /* user code for event processing threw an exception - log and bubble up */ endProcessTracingSpan(processSpanContext, Signal.error(throwable)); throw logger.logExceptionAsError(new RuntimeException("Error in event processing callback", throwable)); } }, /* EventHubConsumer receive() returned an error */ ex -> handleReceiveError(claimedOwnership, eventHubConsumer, partitionProcessor, ex, partitionContext), () -> partitionProcessor.close(new CloseContext(partitionContext, CloseReason.EVENT_PROCESSOR_SHUTDOWN))); }
class PartitionPumpManager { private final ClientLogger logger = new ClientLogger(PartitionPumpManager.class); private final CheckpointStore checkpointStore; private final Map<String, EventHubConsumerAsyncClient> partitionPumps = new ConcurrentHashMap<>(); private final Supplier<PartitionProcessor> partitionProcessorFactory; private final EventHubClientBuilder eventHubClientBuilder; private final TracerProvider tracerProvider; private final boolean trackLastEnqueuedEventProperties; /** * Creates an instance of partition pump manager. * * @param checkpointStore The partition manager that is used to store and update checkpoints. * @param partitionProcessorFactory The partition processor factory that is used to create new instances of {@link * PartitionProcessor} when new partition pumps are started. * @param eventHubClientBuilder The client builder used to create new clients (and new connections) for each * partition processed by this {@link EventProcessorClient}. * @param trackLastEnqueuedEventProperties If set to {@code true}, all events received by this * EventProcessorClient will also include the last enqueued event properties for it's respective partitions. * @param tracerProvider The tracer implementation. */ PartitionPumpManager(CheckpointStore checkpointStore, Supplier<PartitionProcessor> partitionProcessorFactory, EventHubClientBuilder eventHubClientBuilder, boolean trackLastEnqueuedEventProperties, TracerProvider tracerProvider) { this.checkpointStore = checkpointStore; this.partitionProcessorFactory = partitionProcessorFactory; this.eventHubClientBuilder = eventHubClientBuilder; this.trackLastEnqueuedEventProperties = trackLastEnqueuedEventProperties; this.tracerProvider = tracerProvider; } /** * Stops all partition pumps that are actively consuming events. This method is invoked when the {@link * EventProcessorClient} is requested to stop. */ void stopAllPartitionPumps() { this.partitionPumps.forEach((partitionId, eventHubConsumer) -> { try { eventHubConsumer.close(); } catch (Exception ex) { logger.warning(Messages.FAILED_CLOSE_CONSUMER_PARTITION, partitionId, ex); } finally { partitionPumps.remove(partitionId); } }); } /** * Starts a new partition pump for the newly claimed partition. If the partition already has an active partition * pump, this will not create a new consumer. * * @param claimedOwnership The details of partition ownership for which new partition pump is requested to start. */ private void handleReceiveError(PartitionOwnership claimedOwnership, EventHubConsumerAsyncClient eventHubConsumer, PartitionProcessor partitionProcessor, Throwable throwable, PartitionContext partitionContext) { try { logger.warning("Error receiving events for partition {}", partitionContext.getPartitionId(), throwable); partitionProcessor.processError(new ErrorContext(partitionContext, throwable)); CloseReason closeReason = CloseReason.LOST_PARTITION_OWNERSHIP; partitionProcessor.close(new CloseContext(partitionContext, closeReason)); } catch (Exception ex) { logger.warning(Messages.FAILED_PROCESSING_ERROR_RECEIVE, claimedOwnership.getPartitionId(), ex); } finally { try { eventHubConsumer.close(); } finally { partitionPumps.remove(claimedOwnership.getPartitionId()); } } } /* * Starts a new process tracing span and attaches the returned context to the EventData object for users. */ private Context startProcessTracingSpan(EventData eventData, String eventHubName, String fullyQualifiedNamespace) { Object diagnosticId = eventData.getProperties().get(DIAGNOSTIC_ID_KEY); if (diagnosticId == null || !tracerProvider.isEnabled()) { return Context.NONE; } Context spanContext = tracerProvider.extractContext(diagnosticId.toString(), Context.NONE); Context entityContext = spanContext.addData(ENTITY_PATH_KEY, eventHubName); return tracerProvider.startSpan(entityContext.addData(HOST_NAME_KEY, fullyQualifiedNamespace), ProcessKind.PROCESS); } /* * Ends the process tracing span and the scope of that span. */ private void endProcessTracingSpan(Context processSpanContext, Signal<Void> signal) { Optional<Object> spanScope = processSpanContext.getData(SCOPE_KEY); if (!spanScope.isPresent() || !tracerProvider.isEnabled()) { return; } if (spanScope.get() instanceof Closeable) { Closeable close = (Closeable) processSpanContext.getData(SCOPE_KEY).get(); try { close.close(); tracerProvider.endSpan(processSpanContext, signal); } catch (IOException ioException) { logger.error(Messages.EVENT_PROCESSOR_RUN_END, ioException); } } else { logger.warning(String.format(Locale.US, Messages.PROCESS_SPAN_SCOPE_TYPE_ERROR, spanScope.get() != null ? spanScope.getClass() : "null")); } } }
class PartitionPumpManager { private final ClientLogger logger = new ClientLogger(PartitionPumpManager.class); private final CheckpointStore checkpointStore; private final Map<String, EventHubConsumerAsyncClient> partitionPumps = new ConcurrentHashMap<>(); private final Supplier<PartitionProcessor> partitionProcessorFactory; private final EventHubClientBuilder eventHubClientBuilder; private final TracerProvider tracerProvider; private final boolean trackLastEnqueuedEventProperties; /** * Creates an instance of partition pump manager. * * @param checkpointStore The partition manager that is used to store and update checkpoints. * @param partitionProcessorFactory The partition processor factory that is used to create new instances of {@link * PartitionProcessor} when new partition pumps are started. * @param eventHubClientBuilder The client builder used to create new clients (and new connections) for each * partition processed by this {@link EventProcessorClient}. * @param trackLastEnqueuedEventProperties If set to {@code true}, all events received by this EventProcessorClient * will also include the last enqueued event properties for it's respective partitions. * @param tracerProvider The tracer implementation. */ PartitionPumpManager(CheckpointStore checkpointStore, Supplier<PartitionProcessor> partitionProcessorFactory, EventHubClientBuilder eventHubClientBuilder, boolean trackLastEnqueuedEventProperties, TracerProvider tracerProvider) { this.checkpointStore = checkpointStore; this.partitionProcessorFactory = partitionProcessorFactory; this.eventHubClientBuilder = eventHubClientBuilder; this.trackLastEnqueuedEventProperties = trackLastEnqueuedEventProperties; this.tracerProvider = tracerProvider; } /** * Stops all partition pumps that are actively consuming events. This method is invoked when the {@link * EventProcessorClient} is requested to stop. */ void stopAllPartitionPumps() { this.partitionPumps.forEach((partitionId, eventHubConsumer) -> { try { eventHubConsumer.close(); } catch (Exception ex) { logger.warning(Messages.FAILED_CLOSE_CONSUMER_PARTITION, partitionId, ex); } finally { partitionPumps.remove(partitionId); } }); } /** * Starts a new partition pump for the newly claimed partition. If the partition already has an active partition * pump, this will not create a new consumer. * * @param claimedOwnership The details of partition ownership for which new partition pump is requested to start. */ private void handleReceiveError(PartitionOwnership claimedOwnership, EventHubConsumerAsyncClient eventHubConsumer, PartitionProcessor partitionProcessor, Throwable throwable, PartitionContext partitionContext) { try { logger.warning("Error receiving events for partition {}", partitionContext.getPartitionId(), throwable); partitionProcessor.processError(new ErrorContext(partitionContext, throwable)); CloseReason closeReason = CloseReason.LOST_PARTITION_OWNERSHIP; partitionProcessor.close(new CloseContext(partitionContext, closeReason)); } catch (Exception ex) { logger.warning(Messages.FAILED_PROCESSING_ERROR_RECEIVE, claimedOwnership.getPartitionId(), ex); } finally { try { eventHubConsumer.close(); } finally { partitionPumps.remove(claimedOwnership.getPartitionId()); } } } /* * Starts a new process tracing span and attaches the returned context to the EventData object for users. */ private Context startProcessTracingSpan(EventData eventData, String eventHubName, String fullyQualifiedNamespace) { Object diagnosticId = eventData.getProperties().get(DIAGNOSTIC_ID_KEY); if (diagnosticId == null || !tracerProvider.isEnabled()) { return Context.NONE; } Context spanContext = tracerProvider.extractContext(diagnosticId.toString(), Context.NONE); Context entityContext = spanContext.addData(ENTITY_PATH_KEY, eventHubName); return tracerProvider.startSpan(entityContext.addData(HOST_NAME_KEY, fullyQualifiedNamespace), ProcessKind.PROCESS); } /* * Ends the process tracing span and the scope of that span. */ private void endProcessTracingSpan(Context processSpanContext, Signal<Void> signal) { Optional<Object> spanScope = processSpanContext.getData(SCOPE_KEY); if (!spanScope.isPresent() || !tracerProvider.isEnabled()) { return; } if (spanScope.get() instanceof Closeable) { Closeable close = (Closeable) processSpanContext.getData(SCOPE_KEY).get(); try { close.close(); tracerProvider.endSpan(processSpanContext, signal); } catch (IOException ioException) { logger.error(Messages.EVENT_PROCESSOR_RUN_END, ioException); } } else { logger.warning(String.format(Locale.US, Messages.PROCESS_SPAN_SCOPE_TYPE_ERROR, spanScope.get() != null ? spanScope.getClass() : "null")); } } }
Curious why we are collecting these into a list? it only emits when the flux is complete.
private Mono<Void> loadBalance(final Tuple2<Map<String, PartitionOwnership>, List<String>> tuple) { return Mono.fromRunnable(() -> { logger.info("Starting load balancer"); Map<String, PartitionOwnership> partitionOwnershipMap = tuple.getT1(); List<String> partitionIds = tuple.getT2(); if (CoreUtils.isNullOrEmpty(partitionIds)) { throw logger.logExceptionAsError(Exceptions.propagate( new IllegalStateException("There are no partitions in Event Hub " + eventHubName))); } int numberOfPartitions = partitionIds.size(); logger.info("Partition manager returned {} ownership records", partitionOwnershipMap.size()); logger.info("EventHubAsyncClient returned {} partitions", numberOfPartitions); if (!isValid(partitionOwnershipMap)) { throw logger.logExceptionAsError(Exceptions.propagate( new IllegalStateException("Invalid partitionOwnership data from CheckpointStore"))); } /* * Remove all partitions' ownership that have not been modified for a configuration period of time. This * means that the previous EventProcessor that owned the partition is probably down and the partition is now * eligible to be claimed by other EventProcessors. */ Map<String, PartitionOwnership> activePartitionOwnershipMap = removeInactivePartitionOwnerships( partitionOwnershipMap); logger.info("Number of active ownership records {}", activePartitionOwnershipMap.size()); /* * Create a map of owner id and a list of partitions it owns */ Map<String, List<PartitionOwnership>> ownerPartitionMap = activePartitionOwnershipMap.values() .stream() .collect( Collectors.groupingBy(PartitionOwnership::getOwnerId, mapping(Function.identity(), toList()))); ownerPartitionMap.putIfAbsent(this.ownerId, new ArrayList<>()); if (CoreUtils.isNullOrEmpty(activePartitionOwnershipMap)) { /* * If the active partition ownership map is empty, this is the first time an event processor is * running or all Event Processors are down for this Event Hub, consumer group combination. All * partitions in this Event Hub are available to claim. Choose a random partition to claim ownership. */ claimOwnership(partitionOwnershipMap, ownerPartitionMap, partitionIds.get(RANDOM.nextInt(numberOfPartitions))); return; } /* * Find the minimum number of partitions every event processor should own when the load is * evenly distributed. */ int numberOfActiveEventProcessors = ownerPartitionMap.size(); logger.info("Number of active event processors {}", ownerPartitionMap.size()); int minPartitionsPerEventProcessor = numberOfPartitions / numberOfActiveEventProcessors; /* * If the number of partitions in Event Hub is not evenly divisible by number of active event processors, * a few Event Processors may own 1 additional partition than the minimum when the load is balanced. * Calculate the number of event processors that can own additional partition. */ int numberOfEventProcessorsWithAdditionalPartition = numberOfPartitions % numberOfActiveEventProcessors; logger.info("Expected min partitions per event processor = {}, expected number of event " + "processors with additional partition = {}", minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition); if (isLoadBalanced(minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition, ownerPartitionMap)) { logger.info("Load is balanced"); checkpointStore.claimOwnership(partitionPumpManager.getPartitionPumps().keySet() .stream() .map(partitionId -> createPartitionOwnershipRequest(partitionOwnershipMap, partitionId)) .collect(Collectors.toList())) .subscribe(); return; } if (!shouldOwnMorePartitions(minPartitionsPerEventProcessor, ownerPartitionMap)) { logger.info("This event processor owns {} partitions and shouldn't own more", ownerPartitionMap.get(ownerId).size()); checkpointStore.claimOwnership(partitionPumpManager.getPartitionPumps().keySet() .stream() .map(partitionId -> createPartitionOwnershipRequest(partitionOwnershipMap, partitionId)) .collect(Collectors.toList())) .subscribe(); return; } logger.info( "Load is unbalanced and this event processor should own more partitions"); /* * If some partitions are unclaimed, this could be because an event processor is down and * it's partitions are now available for others to own or because event processors are just * starting up and gradually claiming partitions to own or new partitions were added to Event Hub. * Find any partition that is not actively owned and claim it. * * OR * * Find a partition to steal from another event processor. Pick the event processor that has owns the * highest number of partitions. */ String partitionToClaim = partitionIds.parallelStream() .filter(partitionId -> !activePartitionOwnershipMap.containsKey(partitionId)) .findAny() .orElseGet(() -> { logger.info("No unclaimed partitions, stealing from another event processor"); return findPartitionToSteal(ownerPartitionMap); }); claimOwnership(partitionOwnershipMap, ownerPartitionMap, partitionToClaim); }); }
.collect(Collectors.toList()))
private Mono<Void> loadBalance(final Tuple2<Map<String, PartitionOwnership>, List<String>> tuple) { return Mono.fromRunnable(() -> { logger.info("Starting load balancer"); Map<String, PartitionOwnership> partitionOwnershipMap = tuple.getT1(); List<String> partitionIds = tuple.getT2(); if (CoreUtils.isNullOrEmpty(partitionIds)) { throw logger.logExceptionAsError(Exceptions.propagate( new IllegalStateException("There are no partitions in Event Hub " + eventHubName))); } int numberOfPartitions = partitionIds.size(); logger.info("Partition manager returned {} ownership records", partitionOwnershipMap.size()); logger.info("EventHubAsyncClient returned {} partitions", numberOfPartitions); if (!isValid(partitionOwnershipMap)) { throw logger.logExceptionAsError(Exceptions.propagate( new IllegalStateException("Invalid partitionOwnership data from CheckpointStore"))); } /* * Remove all partitions' ownership that have not been modified for a configuration period of time. This * means that the previous EventProcessor that owned the partition is probably down and the partition is now * eligible to be claimed by other EventProcessors. */ Map<String, PartitionOwnership> activePartitionOwnershipMap = removeInactivePartitionOwnerships( partitionOwnershipMap); logger.info("Number of active ownership records {}", activePartitionOwnershipMap.size()); /* * Create a map of owner id and a list of partitions it owns */ Map<String, List<PartitionOwnership>> ownerPartitionMap = activePartitionOwnershipMap.values() .stream() .collect( Collectors.groupingBy(PartitionOwnership::getOwnerId, mapping(Function.identity(), toList()))); ownerPartitionMap.putIfAbsent(this.ownerId, new ArrayList<>()); if (CoreUtils.isNullOrEmpty(activePartitionOwnershipMap)) { /* * If the active partition ownership map is empty, this is the first time an event processor is * running or all Event Processors are down for this Event Hub, consumer group combination. All * partitions in this Event Hub are available to claim. Choose a random partition to claim ownership. */ claimOwnership(partitionOwnershipMap, ownerPartitionMap, partitionIds.get(RANDOM.nextInt(numberOfPartitions))); return; } /* * Find the minimum number of partitions every event processor should own when the load is * evenly distributed. */ int numberOfActiveEventProcessors = ownerPartitionMap.size(); logger.info("Number of active event processors {}", ownerPartitionMap.size()); int minPartitionsPerEventProcessor = numberOfPartitions / numberOfActiveEventProcessors; /* * If the number of partitions in Event Hub is not evenly divisible by number of active event processors, * a few Event Processors may own 1 additional partition than the minimum when the load is balanced. * Calculate the number of event processors that can own additional partition. */ int numberOfEventProcessorsWithAdditionalPartition = numberOfPartitions % numberOfActiveEventProcessors; logger.info("Expected min partitions per event processor = {}, expected number of event " + "processors with additional partition = {}", minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition); if (isLoadBalanced(minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition, ownerPartitionMap)) { logger.info("Load is balanced"); checkpointStore.claimOwnership(partitionPumpManager.getPartitionPumps().keySet() .stream() .map(partitionId -> createPartitionOwnershipRequest(partitionOwnershipMap, partitionId)) .collect(Collectors.toList())) .subscribe(); return; } if (!shouldOwnMorePartitions(minPartitionsPerEventProcessor, ownerPartitionMap)) { logger.info("This event processor owns {} partitions and shouldn't own more", ownerPartitionMap.get(ownerId).size()); checkpointStore.claimOwnership(partitionPumpManager.getPartitionPumps().keySet() .stream() .map(partitionId -> createPartitionOwnershipRequest(partitionOwnershipMap, partitionId)) .collect(Collectors.toList())) .subscribe(); return; } logger.info( "Load is unbalanced and this event processor should own more partitions"); /* * If some partitions are unclaimed, this could be because an event processor is down and * it's partitions are now available for others to own or because event processors are just * starting up and gradually claiming partitions to own or new partitions were added to Event Hub. * Find any partition that is not actively owned and claim it. * * OR * * Find a partition to steal from another event processor. Pick the event processor that has owns the * highest number of partitions. */ String partitionToClaim = partitionIds.parallelStream() .filter(partitionId -> !activePartitionOwnershipMap.containsKey(partitionId)) .findAny() .orElseGet(() -> { logger.info("No unclaimed partitions, stealing from another event processor"); return findPartitionToSteal(ownerPartitionMap); }); claimOwnership(partitionOwnershipMap, ownerPartitionMap, partitionToClaim); }); }
class PartitionBasedLoadBalancer { private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(PartitionBasedLoadBalancer.class); private final String eventHubName; private final String consumerGroupName; private final CheckpointStore checkpointStore; private final EventHubAsyncClient eventHubAsyncClient; private final String ownerId; private final long inactiveTimeLimitInSeconds; private final PartitionPumpManager partitionPumpManager; private final String fullyQualifiedNamespace; private final Consumer<ErrorContext> processError; private final PartitionContext partitionAgnosticContext; /** * Creates an instance of PartitionBasedLoadBalancer for the given Event Hub name and consumer group. * * @param checkpointStore The partition manager that this load balancer will use to read/update ownership details. * @param eventHubAsyncClient The asynchronous Event Hub client used to consume events. * @param eventHubName The Event Hub name the {@link EventProcessorClient} is associated with. * @param consumerGroupName The consumer group name the {@link EventProcessorClient} is associated with. * @param ownerId The identifier of the {@link EventProcessorClient} that owns this load balancer. * @param inactiveTimeLimitInSeconds The time in seconds to wait for an update on an ownership record before * assuming the owner of the partition is inactive. * @param partitionPumpManager The partition pump manager that keeps track of all EventHubConsumers and partitions * that this {@link EventProcessorClient} is processing. * @param processError The callback that will be called when an error occurs while running the load balancer. */ PartitionBasedLoadBalancer(final CheckpointStore checkpointStore, final EventHubAsyncClient eventHubAsyncClient, final String fullyQualifiedNamespace, final String eventHubName, final String consumerGroupName, final String ownerId, final long inactiveTimeLimitInSeconds, final PartitionPumpManager partitionPumpManager, final Consumer<ErrorContext> processError) { this.checkpointStore = checkpointStore; this.eventHubAsyncClient = eventHubAsyncClient; this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.eventHubName = eventHubName; this.consumerGroupName = consumerGroupName; this.ownerId = ownerId; this.inactiveTimeLimitInSeconds = inactiveTimeLimitInSeconds; this.partitionPumpManager = partitionPumpManager; this.processError = processError; this.partitionAgnosticContext = new PartitionContext(fullyQualifiedNamespace, eventHubName, consumerGroupName, "NONE"); } /** * This is the main method responsible for load balancing. This method is expected to be invoked by the {@link * EventProcessorClient} periodically. Every call to this method will result in this {@link EventProcessorClient} * owning <b>at most one</b> new partition. * <p> * The load is considered balanced when no active EventProcessor owns 2 partitions more than any other active * EventProcessor. Given that each invocation to this method results in ownership claim of at most one partition, * this algorithm converges gradually towards a steady state. * </p> * When a new partition is claimed, this method is also responsible for starting a partition pump that creates an * {@link EventHubConsumerAsyncClient} for processing events from that partition. */ void loadBalance() { /* * Retrieve current partition ownership details from the datastore. */ final Mono<Map<String, PartitionOwnership>> partitionOwnershipMono = checkpointStore .listOwnership(fullyQualifiedNamespace, eventHubName, consumerGroupName) .timeout(Duration.ofMinutes(1)) .collectMap(PartitionOwnership::getPartitionId, Function.identity()); /* * Retrieve the list of partition ids from the Event Hub. */ final Mono<List<String>> partitionsMono = eventHubAsyncClient .getPartitionIds() .timeout(Duration.ofMinutes(1)) .collectList(); Mono.zip(partitionOwnershipMono, partitionsMono) .flatMap(this::loadBalance) .subscribe(ignored -> { }, ex -> { logger.warning(Messages.LOAD_BALANCING_FAILED, ex.getMessage()); ErrorContext errorContext = new ErrorContext(partitionAgnosticContext, ex); processError.accept(errorContext); }, () -> logger.info("Load balancing completed successfully")); } /* * This method works with the given partition ownership details and Event Hub partitions to evaluate whether the * current Event Processor should take on the responsibility of processing more partitions. */ /* * Check if partition ownership data is valid before proceeding with load balancing. */ private boolean isValid(final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap.values() .stream() .noneMatch(partitionOwnership -> { return partitionOwnership.getEventHubName() == null || !partitionOwnership.getEventHubName().equals(this.eventHubName) || partitionOwnership.getConsumerGroup() == null || !partitionOwnership.getConsumerGroup().equals(this.consumerGroupName) || partitionOwnership.getPartitionId() == null || partitionOwnership.getLastModifiedTime() == null || partitionOwnership.getETag() == null; }); } /* * Find the event processor that owns the maximum number of partitions and steal a random partition * from it. */ private String findPartitionToSteal(final Map<String, List<PartitionOwnership>> ownerPartitionMap) { Map.Entry<String, List<PartitionOwnership>> ownerWithMaxPartitions = ownerPartitionMap.entrySet() .stream() .max(Comparator.comparingInt(entry -> entry.getValue().size())) .get(); int numberOfPartitions = ownerWithMaxPartitions.getValue().size(); logger.info("Owner id {} owns {} partitions, stealing a partition from it", ownerWithMaxPartitions.getKey(), numberOfPartitions); return ownerWithMaxPartitions.getValue().get(RANDOM.nextInt(numberOfPartitions)).getPartitionId(); } /* * When the load is balanced, all active event processors own at least {@code minPartitionsPerEventProcessor} * and only {@code numberOfEventProcessorsWithAdditionalPartition} event processors will own 1 additional * partition. */ private boolean isLoadBalanced(final int minPartitionsPerEventProcessor, final int numberOfEventProcessorsWithAdditionalPartition, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int count = 0; for (List<PartitionOwnership> partitionOwnership : ownerPartitionMap.values()) { int numberOfPartitions = partitionOwnership.size(); if (numberOfPartitions < minPartitionsPerEventProcessor || numberOfPartitions > minPartitionsPerEventProcessor + 1) { return false; } if (numberOfPartitions == minPartitionsPerEventProcessor + 1) { count++; } } return count == numberOfEventProcessorsWithAdditionalPartition; } /* * This method is called after determining that the load is not balanced. This method will evaluate * if the current event processor should own more partitions. Specifically, this method returns true if the * current event processor owns less than the minimum number of partitions or if it owns the minimum number * and no other event processor owns lesser number of partitions than this event processor. */ private boolean shouldOwnMorePartitions(final int minPartitionsPerEventProcessor, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int numberOfPartitionsOwned = ownerPartitionMap.get(this.ownerId).size(); int leastPartitionsOwnedByAnyEventProcessor = ownerPartitionMap.values().stream().min(Comparator.comparingInt(List::size)).get().size(); return numberOfPartitionsOwned < minPartitionsPerEventProcessor || numberOfPartitionsOwned == leastPartitionsOwnedByAnyEventProcessor; } /* * This method will create a new map of partition id and PartitionOwnership containing only those partitions * that are actively owned. All entries in the original map returned by CheckpointStore that haven't been * modified for a duration of time greater than the allowed inactivity time limit are assumed to be owned by * dead event processors. These will not be included in the map returned by this method. */ private Map<String, PartitionOwnership> removeInactivePartitionOwnerships( final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap .entrySet() .stream() .filter(entry -> { return (System.currentTimeMillis() - entry.getValue().getLastModifiedTime() < TimeUnit.SECONDS .toMillis(inactiveTimeLimitInSeconds)) && !CoreUtils.isNullOrEmpty(entry.getValue().getOwnerId()); }).collect(Collectors.toMap(Entry::getKey, Entry::getValue)); } private void claimOwnership(final Map<String, PartitionOwnership> partitionOwnershipMap, Map<String, List<PartitionOwnership>> ownerPartitionsMap, final String partitionIdToClaim) { logger.info("Attempting to claim ownership of partition {}", partitionIdToClaim); PartitionOwnership ownershipRequest = createPartitionOwnershipRequest(partitionOwnershipMap, partitionIdToClaim); List<PartitionOwnership> partitionsToClaim = new ArrayList<>(); partitionsToClaim.add(ownershipRequest); partitionsToClaim.addAll(partitionPumpManager.getPartitionPumps() .keySet() .stream() .map(partitionId -> createPartitionOwnershipRequest(partitionOwnershipMap, partitionId)) .collect(Collectors.toList())); checkpointStore .claimOwnership(partitionsToClaim) .timeout(Duration.ofMinutes(1)) .doOnNext(partitionOwnership -> logger.info("Successfully claimed ownership of partition {}", partitionOwnership.getPartitionId())) .doOnError(ex -> logger .warning(Messages.FAILED_TO_CLAIM_OWNERSHIP, ownershipRequest.getPartitionId(), ex.getMessage(), ex)) .collectList() .zipWith(checkpointStore.listCheckpoints(fullyQualifiedNamespace, eventHubName, consumerGroupName) .collectMap(checkpoint -> checkpoint.getPartitionId(), Function.identity())) .subscribe(ownedPartitionCheckpointsTuple -> { ownedPartitionCheckpointsTuple.getT1() .stream() .forEach(po -> partitionPumpManager.startPartitionPump(po, ownedPartitionCheckpointsTuple.getT2().get(po.getPartitionId()))); }, ex -> { throw logger.logExceptionAsError(new RuntimeException("Error while listing checkpoints", ex)); }); } private PartitionOwnership createPartitionOwnershipRequest( final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { PartitionOwnership previousPartitionOwnership = partitionOwnershipMap.get(partitionIdToClaim); PartitionOwnership partitionOwnershipRequest = new PartitionOwnership() .setFullyQualifiedNamespace(this.fullyQualifiedNamespace) .setOwnerId(this.ownerId) .setPartitionId(partitionIdToClaim) .setConsumerGroup(this.consumerGroupName) .setEventHubName(this.eventHubName) .setETag(previousPartitionOwnership == null ? null : previousPartitionOwnership.getETag()); return partitionOwnershipRequest; } }
class PartitionBasedLoadBalancer { private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(PartitionBasedLoadBalancer.class); private final String eventHubName; private final String consumerGroupName; private final CheckpointStore checkpointStore; private final EventHubAsyncClient eventHubAsyncClient; private final String ownerId; private final long inactiveTimeLimitInSeconds; private final PartitionPumpManager partitionPumpManager; private final String fullyQualifiedNamespace; private final Consumer<ErrorContext> processError; private final PartitionContext partitionAgnosticContext; /** * Creates an instance of PartitionBasedLoadBalancer for the given Event Hub name and consumer group. * * @param checkpointStore The partition manager that this load balancer will use to read/update ownership details. * @param eventHubAsyncClient The asynchronous Event Hub client used to consume events. * @param eventHubName The Event Hub name the {@link EventProcessorClient} is associated with. * @param consumerGroupName The consumer group name the {@link EventProcessorClient} is associated with. * @param ownerId The identifier of the {@link EventProcessorClient} that owns this load balancer. * @param inactiveTimeLimitInSeconds The time in seconds to wait for an update on an ownership record before * assuming the owner of the partition is inactive. * @param partitionPumpManager The partition pump manager that keeps track of all EventHubConsumers and partitions * that this {@link EventProcessorClient} is processing. * @param processError The callback that will be called when an error occurs while running the load balancer. */ PartitionBasedLoadBalancer(final CheckpointStore checkpointStore, final EventHubAsyncClient eventHubAsyncClient, final String fullyQualifiedNamespace, final String eventHubName, final String consumerGroupName, final String ownerId, final long inactiveTimeLimitInSeconds, final PartitionPumpManager partitionPumpManager, final Consumer<ErrorContext> processError) { this.checkpointStore = checkpointStore; this.eventHubAsyncClient = eventHubAsyncClient; this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.eventHubName = eventHubName; this.consumerGroupName = consumerGroupName; this.ownerId = ownerId; this.inactiveTimeLimitInSeconds = inactiveTimeLimitInSeconds; this.partitionPumpManager = partitionPumpManager; this.processError = processError; this.partitionAgnosticContext = new PartitionContext(fullyQualifiedNamespace, eventHubName, consumerGroupName, "NONE"); } /** * This is the main method responsible for load balancing. This method is expected to be invoked by the {@link * EventProcessorClient} periodically. Every call to this method will result in this {@link EventProcessorClient} * owning <b>at most one</b> new partition. * <p> * The load is considered balanced when no active EventProcessor owns 2 partitions more than any other active * EventProcessor. Given that each invocation to this method results in ownership claim of at most one partition, * this algorithm converges gradually towards a steady state. * </p> * When a new partition is claimed, this method is also responsible for starting a partition pump that creates an * {@link EventHubConsumerAsyncClient} for processing events from that partition. */ void loadBalance() { /* * Retrieve current partition ownership details from the datastore. */ final Mono<Map<String, PartitionOwnership>> partitionOwnershipMono = checkpointStore .listOwnership(fullyQualifiedNamespace, eventHubName, consumerGroupName) .timeout(Duration.ofMinutes(1)) .collectMap(PartitionOwnership::getPartitionId, Function.identity()); /* * Retrieve the list of partition ids from the Event Hub. */ final Mono<List<String>> partitionsMono = eventHubAsyncClient .getPartitionIds() .timeout(Duration.ofMinutes(1)) .collectList(); Mono.zip(partitionOwnershipMono, partitionsMono) .flatMap(this::loadBalance) .subscribe(ignored -> { }, ex -> { logger.warning(Messages.LOAD_BALANCING_FAILED, ex.getMessage()); ErrorContext errorContext = new ErrorContext(partitionAgnosticContext, ex); processError.accept(errorContext); }, () -> logger.info("Load balancing completed successfully")); } /* * This method works with the given partition ownership details and Event Hub partitions to evaluate whether the * current Event Processor should take on the responsibility of processing more partitions. */ /* * Check if partition ownership data is valid before proceeding with load balancing. */ private boolean isValid(final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap.values() .stream() .noneMatch(partitionOwnership -> { return partitionOwnership.getEventHubName() == null || !partitionOwnership.getEventHubName().equals(this.eventHubName) || partitionOwnership.getConsumerGroup() == null || !partitionOwnership.getConsumerGroup().equals(this.consumerGroupName) || partitionOwnership.getPartitionId() == null || partitionOwnership.getLastModifiedTime() == null || partitionOwnership.getETag() == null; }); } /* * Find the event processor that owns the maximum number of partitions and steal a random partition * from it. */ private String findPartitionToSteal(final Map<String, List<PartitionOwnership>> ownerPartitionMap) { Map.Entry<String, List<PartitionOwnership>> ownerWithMaxPartitions = ownerPartitionMap.entrySet() .stream() .max(Comparator.comparingInt(entry -> entry.getValue().size())) .get(); int numberOfPartitions = ownerWithMaxPartitions.getValue().size(); logger.info("Owner id {} owns {} partitions, stealing a partition from it", ownerWithMaxPartitions.getKey(), numberOfPartitions); return ownerWithMaxPartitions.getValue().get(RANDOM.nextInt(numberOfPartitions)).getPartitionId(); } /* * When the load is balanced, all active event processors own at least {@code minPartitionsPerEventProcessor} * and only {@code numberOfEventProcessorsWithAdditionalPartition} event processors will own 1 additional * partition. */ private boolean isLoadBalanced(final int minPartitionsPerEventProcessor, final int numberOfEventProcessorsWithAdditionalPartition, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int count = 0; for (List<PartitionOwnership> partitionOwnership : ownerPartitionMap.values()) { int numberOfPartitions = partitionOwnership.size(); if (numberOfPartitions < minPartitionsPerEventProcessor || numberOfPartitions > minPartitionsPerEventProcessor + 1) { return false; } if (numberOfPartitions == minPartitionsPerEventProcessor + 1) { count++; } } return count == numberOfEventProcessorsWithAdditionalPartition; } /* * This method is called after determining that the load is not balanced. This method will evaluate * if the current event processor should own more partitions. Specifically, this method returns true if the * current event processor owns less than the minimum number of partitions or if it owns the minimum number * and no other event processor owns lesser number of partitions than this event processor. */ private boolean shouldOwnMorePartitions(final int minPartitionsPerEventProcessor, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int numberOfPartitionsOwned = ownerPartitionMap.get(this.ownerId).size(); int leastPartitionsOwnedByAnyEventProcessor = ownerPartitionMap.values().stream().min(Comparator.comparingInt(List::size)).get().size(); return numberOfPartitionsOwned < minPartitionsPerEventProcessor || numberOfPartitionsOwned == leastPartitionsOwnedByAnyEventProcessor; } /* * This method will create a new map of partition id and PartitionOwnership containing only those partitions * that are actively owned. All entries in the original map returned by CheckpointStore that haven't been * modified for a duration of time greater than the allowed inactivity time limit are assumed to be owned by * dead event processors. These will not be included in the map returned by this method. */ private Map<String, PartitionOwnership> removeInactivePartitionOwnerships( final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap .entrySet() .stream() .filter(entry -> { return (System.currentTimeMillis() - entry.getValue().getLastModifiedTime() < TimeUnit.SECONDS .toMillis(inactiveTimeLimitInSeconds)) && !CoreUtils.isNullOrEmpty(entry.getValue().getOwnerId()); }).collect(Collectors.toMap(Entry::getKey, Entry::getValue)); } private void claimOwnership(final Map<String, PartitionOwnership> partitionOwnershipMap, Map<String, List<PartitionOwnership>> ownerPartitionsMap, final String partitionIdToClaim) { logger.info("Attempting to claim ownership of partition {}", partitionIdToClaim); PartitionOwnership ownershipRequest = createPartitionOwnershipRequest(partitionOwnershipMap, partitionIdToClaim); List<PartitionOwnership> partitionsToClaim = new ArrayList<>(); partitionsToClaim.add(ownershipRequest); partitionsToClaim.addAll(partitionPumpManager.getPartitionPumps() .keySet() .stream() .map(partitionId -> createPartitionOwnershipRequest(partitionOwnershipMap, partitionId)) .collect(Collectors.toList())); checkpointStore .claimOwnership(partitionsToClaim) .timeout(Duration.ofMinutes(1)) .doOnNext(partitionOwnership -> logger.info("Successfully claimed ownership of partition {}", partitionOwnership.getPartitionId())) .doOnError(ex -> logger .warning(Messages.FAILED_TO_CLAIM_OWNERSHIP, ownershipRequest.getPartitionId(), ex.getMessage(), ex)) .collectList() .zipWith(checkpointStore.listCheckpoints(fullyQualifiedNamespace, eventHubName, consumerGroupName) .collectMap(checkpoint -> checkpoint.getPartitionId(), Function.identity())) .subscribe(ownedPartitionCheckpointsTuple -> { ownedPartitionCheckpointsTuple.getT1() .stream() .forEach(po -> partitionPumpManager.startPartitionPump(po, ownedPartitionCheckpointsTuple.getT2().get(po.getPartitionId()))); }, ex -> { throw logger.logExceptionAsError(new RuntimeException("Error while listing checkpoints", ex)); }); } private PartitionOwnership createPartitionOwnershipRequest( final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { PartitionOwnership previousPartitionOwnership = partitionOwnershipMap.get(partitionIdToClaim); PartitionOwnership partitionOwnershipRequest = new PartitionOwnership() .setFullyQualifiedNamespace(this.fullyQualifiedNamespace) .setOwnerId(this.ownerId) .setPartitionId(partitionIdToClaim) .setConsumerGroup(this.consumerGroupName) .setEventHubName(this.eventHubName) .setETag(previousPartitionOwnership == null ? null : previousPartitionOwnership.getETag()); return partitionOwnershipRequest; } }
`checkpointStore.claimOwnership()` method takes a list of partition ownership instances.
private Mono<Void> loadBalance(final Tuple2<Map<String, PartitionOwnership>, List<String>> tuple) { return Mono.fromRunnable(() -> { logger.info("Starting load balancer"); Map<String, PartitionOwnership> partitionOwnershipMap = tuple.getT1(); List<String> partitionIds = tuple.getT2(); if (CoreUtils.isNullOrEmpty(partitionIds)) { throw logger.logExceptionAsError(Exceptions.propagate( new IllegalStateException("There are no partitions in Event Hub " + eventHubName))); } int numberOfPartitions = partitionIds.size(); logger.info("Partition manager returned {} ownership records", partitionOwnershipMap.size()); logger.info("EventHubAsyncClient returned {} partitions", numberOfPartitions); if (!isValid(partitionOwnershipMap)) { throw logger.logExceptionAsError(Exceptions.propagate( new IllegalStateException("Invalid partitionOwnership data from CheckpointStore"))); } /* * Remove all partitions' ownership that have not been modified for a configuration period of time. This * means that the previous EventProcessor that owned the partition is probably down and the partition is now * eligible to be claimed by other EventProcessors. */ Map<String, PartitionOwnership> activePartitionOwnershipMap = removeInactivePartitionOwnerships( partitionOwnershipMap); logger.info("Number of active ownership records {}", activePartitionOwnershipMap.size()); /* * Create a map of owner id and a list of partitions it owns */ Map<String, List<PartitionOwnership>> ownerPartitionMap = activePartitionOwnershipMap.values() .stream() .collect( Collectors.groupingBy(PartitionOwnership::getOwnerId, mapping(Function.identity(), toList()))); ownerPartitionMap.putIfAbsent(this.ownerId, new ArrayList<>()); if (CoreUtils.isNullOrEmpty(activePartitionOwnershipMap)) { /* * If the active partition ownership map is empty, this is the first time an event processor is * running or all Event Processors are down for this Event Hub, consumer group combination. All * partitions in this Event Hub are available to claim. Choose a random partition to claim ownership. */ claimOwnership(partitionOwnershipMap, ownerPartitionMap, partitionIds.get(RANDOM.nextInt(numberOfPartitions))); return; } /* * Find the minimum number of partitions every event processor should own when the load is * evenly distributed. */ int numberOfActiveEventProcessors = ownerPartitionMap.size(); logger.info("Number of active event processors {}", ownerPartitionMap.size()); int minPartitionsPerEventProcessor = numberOfPartitions / numberOfActiveEventProcessors; /* * If the number of partitions in Event Hub is not evenly divisible by number of active event processors, * a few Event Processors may own 1 additional partition than the minimum when the load is balanced. * Calculate the number of event processors that can own additional partition. */ int numberOfEventProcessorsWithAdditionalPartition = numberOfPartitions % numberOfActiveEventProcessors; logger.info("Expected min partitions per event processor = {}, expected number of event " + "processors with additional partition = {}", minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition); if (isLoadBalanced(minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition, ownerPartitionMap)) { logger.info("Load is balanced"); checkpointStore.claimOwnership(partitionPumpManager.getPartitionPumps().keySet() .stream() .map(partitionId -> createPartitionOwnershipRequest(partitionOwnershipMap, partitionId)) .collect(Collectors.toList())) .subscribe(); return; } if (!shouldOwnMorePartitions(minPartitionsPerEventProcessor, ownerPartitionMap)) { logger.info("This event processor owns {} partitions and shouldn't own more", ownerPartitionMap.get(ownerId).size()); checkpointStore.claimOwnership(partitionPumpManager.getPartitionPumps().keySet() .stream() .map(partitionId -> createPartitionOwnershipRequest(partitionOwnershipMap, partitionId)) .collect(Collectors.toList())) .subscribe(); return; } logger.info( "Load is unbalanced and this event processor should own more partitions"); /* * If some partitions are unclaimed, this could be because an event processor is down and * it's partitions are now available for others to own or because event processors are just * starting up and gradually claiming partitions to own or new partitions were added to Event Hub. * Find any partition that is not actively owned and claim it. * * OR * * Find a partition to steal from another event processor. Pick the event processor that has owns the * highest number of partitions. */ String partitionToClaim = partitionIds.parallelStream() .filter(partitionId -> !activePartitionOwnershipMap.containsKey(partitionId)) .findAny() .orElseGet(() -> { logger.info("No unclaimed partitions, stealing from another event processor"); return findPartitionToSteal(ownerPartitionMap); }); claimOwnership(partitionOwnershipMap, ownerPartitionMap, partitionToClaim); }); }
.collect(Collectors.toList()))
private Mono<Void> loadBalance(final Tuple2<Map<String, PartitionOwnership>, List<String>> tuple) { return Mono.fromRunnable(() -> { logger.info("Starting load balancer"); Map<String, PartitionOwnership> partitionOwnershipMap = tuple.getT1(); List<String> partitionIds = tuple.getT2(); if (CoreUtils.isNullOrEmpty(partitionIds)) { throw logger.logExceptionAsError(Exceptions.propagate( new IllegalStateException("There are no partitions in Event Hub " + eventHubName))); } int numberOfPartitions = partitionIds.size(); logger.info("Partition manager returned {} ownership records", partitionOwnershipMap.size()); logger.info("EventHubAsyncClient returned {} partitions", numberOfPartitions); if (!isValid(partitionOwnershipMap)) { throw logger.logExceptionAsError(Exceptions.propagate( new IllegalStateException("Invalid partitionOwnership data from CheckpointStore"))); } /* * Remove all partitions' ownership that have not been modified for a configuration period of time. This * means that the previous EventProcessor that owned the partition is probably down and the partition is now * eligible to be claimed by other EventProcessors. */ Map<String, PartitionOwnership> activePartitionOwnershipMap = removeInactivePartitionOwnerships( partitionOwnershipMap); logger.info("Number of active ownership records {}", activePartitionOwnershipMap.size()); /* * Create a map of owner id and a list of partitions it owns */ Map<String, List<PartitionOwnership>> ownerPartitionMap = activePartitionOwnershipMap.values() .stream() .collect( Collectors.groupingBy(PartitionOwnership::getOwnerId, mapping(Function.identity(), toList()))); ownerPartitionMap.putIfAbsent(this.ownerId, new ArrayList<>()); if (CoreUtils.isNullOrEmpty(activePartitionOwnershipMap)) { /* * If the active partition ownership map is empty, this is the first time an event processor is * running or all Event Processors are down for this Event Hub, consumer group combination. All * partitions in this Event Hub are available to claim. Choose a random partition to claim ownership. */ claimOwnership(partitionOwnershipMap, ownerPartitionMap, partitionIds.get(RANDOM.nextInt(numberOfPartitions))); return; } /* * Find the minimum number of partitions every event processor should own when the load is * evenly distributed. */ int numberOfActiveEventProcessors = ownerPartitionMap.size(); logger.info("Number of active event processors {}", ownerPartitionMap.size()); int minPartitionsPerEventProcessor = numberOfPartitions / numberOfActiveEventProcessors; /* * If the number of partitions in Event Hub is not evenly divisible by number of active event processors, * a few Event Processors may own 1 additional partition than the minimum when the load is balanced. * Calculate the number of event processors that can own additional partition. */ int numberOfEventProcessorsWithAdditionalPartition = numberOfPartitions % numberOfActiveEventProcessors; logger.info("Expected min partitions per event processor = {}, expected number of event " + "processors with additional partition = {}", minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition); if (isLoadBalanced(minPartitionsPerEventProcessor, numberOfEventProcessorsWithAdditionalPartition, ownerPartitionMap)) { logger.info("Load is balanced"); checkpointStore.claimOwnership(partitionPumpManager.getPartitionPumps().keySet() .stream() .map(partitionId -> createPartitionOwnershipRequest(partitionOwnershipMap, partitionId)) .collect(Collectors.toList())) .subscribe(); return; } if (!shouldOwnMorePartitions(minPartitionsPerEventProcessor, ownerPartitionMap)) { logger.info("This event processor owns {} partitions and shouldn't own more", ownerPartitionMap.get(ownerId).size()); checkpointStore.claimOwnership(partitionPumpManager.getPartitionPumps().keySet() .stream() .map(partitionId -> createPartitionOwnershipRequest(partitionOwnershipMap, partitionId)) .collect(Collectors.toList())) .subscribe(); return; } logger.info( "Load is unbalanced and this event processor should own more partitions"); /* * If some partitions are unclaimed, this could be because an event processor is down and * it's partitions are now available for others to own or because event processors are just * starting up and gradually claiming partitions to own or new partitions were added to Event Hub. * Find any partition that is not actively owned and claim it. * * OR * * Find a partition to steal from another event processor. Pick the event processor that has owns the * highest number of partitions. */ String partitionToClaim = partitionIds.parallelStream() .filter(partitionId -> !activePartitionOwnershipMap.containsKey(partitionId)) .findAny() .orElseGet(() -> { logger.info("No unclaimed partitions, stealing from another event processor"); return findPartitionToSteal(ownerPartitionMap); }); claimOwnership(partitionOwnershipMap, ownerPartitionMap, partitionToClaim); }); }
class PartitionBasedLoadBalancer { private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(PartitionBasedLoadBalancer.class); private final String eventHubName; private final String consumerGroupName; private final CheckpointStore checkpointStore; private final EventHubAsyncClient eventHubAsyncClient; private final String ownerId; private final long inactiveTimeLimitInSeconds; private final PartitionPumpManager partitionPumpManager; private final String fullyQualifiedNamespace; private final Consumer<ErrorContext> processError; private final PartitionContext partitionAgnosticContext; /** * Creates an instance of PartitionBasedLoadBalancer for the given Event Hub name and consumer group. * * @param checkpointStore The partition manager that this load balancer will use to read/update ownership details. * @param eventHubAsyncClient The asynchronous Event Hub client used to consume events. * @param eventHubName The Event Hub name the {@link EventProcessorClient} is associated with. * @param consumerGroupName The consumer group name the {@link EventProcessorClient} is associated with. * @param ownerId The identifier of the {@link EventProcessorClient} that owns this load balancer. * @param inactiveTimeLimitInSeconds The time in seconds to wait for an update on an ownership record before * assuming the owner of the partition is inactive. * @param partitionPumpManager The partition pump manager that keeps track of all EventHubConsumers and partitions * that this {@link EventProcessorClient} is processing. * @param processError The callback that will be called when an error occurs while running the load balancer. */ PartitionBasedLoadBalancer(final CheckpointStore checkpointStore, final EventHubAsyncClient eventHubAsyncClient, final String fullyQualifiedNamespace, final String eventHubName, final String consumerGroupName, final String ownerId, final long inactiveTimeLimitInSeconds, final PartitionPumpManager partitionPumpManager, final Consumer<ErrorContext> processError) { this.checkpointStore = checkpointStore; this.eventHubAsyncClient = eventHubAsyncClient; this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.eventHubName = eventHubName; this.consumerGroupName = consumerGroupName; this.ownerId = ownerId; this.inactiveTimeLimitInSeconds = inactiveTimeLimitInSeconds; this.partitionPumpManager = partitionPumpManager; this.processError = processError; this.partitionAgnosticContext = new PartitionContext(fullyQualifiedNamespace, eventHubName, consumerGroupName, "NONE"); } /** * This is the main method responsible for load balancing. This method is expected to be invoked by the {@link * EventProcessorClient} periodically. Every call to this method will result in this {@link EventProcessorClient} * owning <b>at most one</b> new partition. * <p> * The load is considered balanced when no active EventProcessor owns 2 partitions more than any other active * EventProcessor. Given that each invocation to this method results in ownership claim of at most one partition, * this algorithm converges gradually towards a steady state. * </p> * When a new partition is claimed, this method is also responsible for starting a partition pump that creates an * {@link EventHubConsumerAsyncClient} for processing events from that partition. */ void loadBalance() { /* * Retrieve current partition ownership details from the datastore. */ final Mono<Map<String, PartitionOwnership>> partitionOwnershipMono = checkpointStore .listOwnership(fullyQualifiedNamespace, eventHubName, consumerGroupName) .timeout(Duration.ofMinutes(1)) .collectMap(PartitionOwnership::getPartitionId, Function.identity()); /* * Retrieve the list of partition ids from the Event Hub. */ final Mono<List<String>> partitionsMono = eventHubAsyncClient .getPartitionIds() .timeout(Duration.ofMinutes(1)) .collectList(); Mono.zip(partitionOwnershipMono, partitionsMono) .flatMap(this::loadBalance) .subscribe(ignored -> { }, ex -> { logger.warning(Messages.LOAD_BALANCING_FAILED, ex.getMessage()); ErrorContext errorContext = new ErrorContext(partitionAgnosticContext, ex); processError.accept(errorContext); }, () -> logger.info("Load balancing completed successfully")); } /* * This method works with the given partition ownership details and Event Hub partitions to evaluate whether the * current Event Processor should take on the responsibility of processing more partitions. */ /* * Check if partition ownership data is valid before proceeding with load balancing. */ private boolean isValid(final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap.values() .stream() .noneMatch(partitionOwnership -> { return partitionOwnership.getEventHubName() == null || !partitionOwnership.getEventHubName().equals(this.eventHubName) || partitionOwnership.getConsumerGroup() == null || !partitionOwnership.getConsumerGroup().equals(this.consumerGroupName) || partitionOwnership.getPartitionId() == null || partitionOwnership.getLastModifiedTime() == null || partitionOwnership.getETag() == null; }); } /* * Find the event processor that owns the maximum number of partitions and steal a random partition * from it. */ private String findPartitionToSteal(final Map<String, List<PartitionOwnership>> ownerPartitionMap) { Map.Entry<String, List<PartitionOwnership>> ownerWithMaxPartitions = ownerPartitionMap.entrySet() .stream() .max(Comparator.comparingInt(entry -> entry.getValue().size())) .get(); int numberOfPartitions = ownerWithMaxPartitions.getValue().size(); logger.info("Owner id {} owns {} partitions, stealing a partition from it", ownerWithMaxPartitions.getKey(), numberOfPartitions); return ownerWithMaxPartitions.getValue().get(RANDOM.nextInt(numberOfPartitions)).getPartitionId(); } /* * When the load is balanced, all active event processors own at least {@code minPartitionsPerEventProcessor} * and only {@code numberOfEventProcessorsWithAdditionalPartition} event processors will own 1 additional * partition. */ private boolean isLoadBalanced(final int minPartitionsPerEventProcessor, final int numberOfEventProcessorsWithAdditionalPartition, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int count = 0; for (List<PartitionOwnership> partitionOwnership : ownerPartitionMap.values()) { int numberOfPartitions = partitionOwnership.size(); if (numberOfPartitions < minPartitionsPerEventProcessor || numberOfPartitions > minPartitionsPerEventProcessor + 1) { return false; } if (numberOfPartitions == minPartitionsPerEventProcessor + 1) { count++; } } return count == numberOfEventProcessorsWithAdditionalPartition; } /* * This method is called after determining that the load is not balanced. This method will evaluate * if the current event processor should own more partitions. Specifically, this method returns true if the * current event processor owns less than the minimum number of partitions or if it owns the minimum number * and no other event processor owns lesser number of partitions than this event processor. */ private boolean shouldOwnMorePartitions(final int minPartitionsPerEventProcessor, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int numberOfPartitionsOwned = ownerPartitionMap.get(this.ownerId).size(); int leastPartitionsOwnedByAnyEventProcessor = ownerPartitionMap.values().stream().min(Comparator.comparingInt(List::size)).get().size(); return numberOfPartitionsOwned < minPartitionsPerEventProcessor || numberOfPartitionsOwned == leastPartitionsOwnedByAnyEventProcessor; } /* * This method will create a new map of partition id and PartitionOwnership containing only those partitions * that are actively owned. All entries in the original map returned by CheckpointStore that haven't been * modified for a duration of time greater than the allowed inactivity time limit are assumed to be owned by * dead event processors. These will not be included in the map returned by this method. */ private Map<String, PartitionOwnership> removeInactivePartitionOwnerships( final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap .entrySet() .stream() .filter(entry -> { return (System.currentTimeMillis() - entry.getValue().getLastModifiedTime() < TimeUnit.SECONDS .toMillis(inactiveTimeLimitInSeconds)) && !CoreUtils.isNullOrEmpty(entry.getValue().getOwnerId()); }).collect(Collectors.toMap(Entry::getKey, Entry::getValue)); } private void claimOwnership(final Map<String, PartitionOwnership> partitionOwnershipMap, Map<String, List<PartitionOwnership>> ownerPartitionsMap, final String partitionIdToClaim) { logger.info("Attempting to claim ownership of partition {}", partitionIdToClaim); PartitionOwnership ownershipRequest = createPartitionOwnershipRequest(partitionOwnershipMap, partitionIdToClaim); List<PartitionOwnership> partitionsToClaim = new ArrayList<>(); partitionsToClaim.add(ownershipRequest); partitionsToClaim.addAll(partitionPumpManager.getPartitionPumps() .keySet() .stream() .map(partitionId -> createPartitionOwnershipRequest(partitionOwnershipMap, partitionId)) .collect(Collectors.toList())); checkpointStore .claimOwnership(partitionsToClaim) .timeout(Duration.ofMinutes(1)) .doOnNext(partitionOwnership -> logger.info("Successfully claimed ownership of partition {}", partitionOwnership.getPartitionId())) .doOnError(ex -> logger .warning(Messages.FAILED_TO_CLAIM_OWNERSHIP, ownershipRequest.getPartitionId(), ex.getMessage(), ex)) .collectList() .zipWith(checkpointStore.listCheckpoints(fullyQualifiedNamespace, eventHubName, consumerGroupName) .collectMap(checkpoint -> checkpoint.getPartitionId(), Function.identity())) .subscribe(ownedPartitionCheckpointsTuple -> { ownedPartitionCheckpointsTuple.getT1() .stream() .forEach(po -> partitionPumpManager.startPartitionPump(po, ownedPartitionCheckpointsTuple.getT2().get(po.getPartitionId()))); }, ex -> { throw logger.logExceptionAsError(new RuntimeException("Error while listing checkpoints", ex)); }); } private PartitionOwnership createPartitionOwnershipRequest( final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { PartitionOwnership previousPartitionOwnership = partitionOwnershipMap.get(partitionIdToClaim); PartitionOwnership partitionOwnershipRequest = new PartitionOwnership() .setFullyQualifiedNamespace(this.fullyQualifiedNamespace) .setOwnerId(this.ownerId) .setPartitionId(partitionIdToClaim) .setConsumerGroup(this.consumerGroupName) .setEventHubName(this.eventHubName) .setETag(previousPartitionOwnership == null ? null : previousPartitionOwnership.getETag()); return partitionOwnershipRequest; } }
class PartitionBasedLoadBalancer { private static final Random RANDOM = new Random(); private final ClientLogger logger = new ClientLogger(PartitionBasedLoadBalancer.class); private final String eventHubName; private final String consumerGroupName; private final CheckpointStore checkpointStore; private final EventHubAsyncClient eventHubAsyncClient; private final String ownerId; private final long inactiveTimeLimitInSeconds; private final PartitionPumpManager partitionPumpManager; private final String fullyQualifiedNamespace; private final Consumer<ErrorContext> processError; private final PartitionContext partitionAgnosticContext; /** * Creates an instance of PartitionBasedLoadBalancer for the given Event Hub name and consumer group. * * @param checkpointStore The partition manager that this load balancer will use to read/update ownership details. * @param eventHubAsyncClient The asynchronous Event Hub client used to consume events. * @param eventHubName The Event Hub name the {@link EventProcessorClient} is associated with. * @param consumerGroupName The consumer group name the {@link EventProcessorClient} is associated with. * @param ownerId The identifier of the {@link EventProcessorClient} that owns this load balancer. * @param inactiveTimeLimitInSeconds The time in seconds to wait for an update on an ownership record before * assuming the owner of the partition is inactive. * @param partitionPumpManager The partition pump manager that keeps track of all EventHubConsumers and partitions * that this {@link EventProcessorClient} is processing. * @param processError The callback that will be called when an error occurs while running the load balancer. */ PartitionBasedLoadBalancer(final CheckpointStore checkpointStore, final EventHubAsyncClient eventHubAsyncClient, final String fullyQualifiedNamespace, final String eventHubName, final String consumerGroupName, final String ownerId, final long inactiveTimeLimitInSeconds, final PartitionPumpManager partitionPumpManager, final Consumer<ErrorContext> processError) { this.checkpointStore = checkpointStore; this.eventHubAsyncClient = eventHubAsyncClient; this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.eventHubName = eventHubName; this.consumerGroupName = consumerGroupName; this.ownerId = ownerId; this.inactiveTimeLimitInSeconds = inactiveTimeLimitInSeconds; this.partitionPumpManager = partitionPumpManager; this.processError = processError; this.partitionAgnosticContext = new PartitionContext(fullyQualifiedNamespace, eventHubName, consumerGroupName, "NONE"); } /** * This is the main method responsible for load balancing. This method is expected to be invoked by the {@link * EventProcessorClient} periodically. Every call to this method will result in this {@link EventProcessorClient} * owning <b>at most one</b> new partition. * <p> * The load is considered balanced when no active EventProcessor owns 2 partitions more than any other active * EventProcessor. Given that each invocation to this method results in ownership claim of at most one partition, * this algorithm converges gradually towards a steady state. * </p> * When a new partition is claimed, this method is also responsible for starting a partition pump that creates an * {@link EventHubConsumerAsyncClient} for processing events from that partition. */ void loadBalance() { /* * Retrieve current partition ownership details from the datastore. */ final Mono<Map<String, PartitionOwnership>> partitionOwnershipMono = checkpointStore .listOwnership(fullyQualifiedNamespace, eventHubName, consumerGroupName) .timeout(Duration.ofMinutes(1)) .collectMap(PartitionOwnership::getPartitionId, Function.identity()); /* * Retrieve the list of partition ids from the Event Hub. */ final Mono<List<String>> partitionsMono = eventHubAsyncClient .getPartitionIds() .timeout(Duration.ofMinutes(1)) .collectList(); Mono.zip(partitionOwnershipMono, partitionsMono) .flatMap(this::loadBalance) .subscribe(ignored -> { }, ex -> { logger.warning(Messages.LOAD_BALANCING_FAILED, ex.getMessage()); ErrorContext errorContext = new ErrorContext(partitionAgnosticContext, ex); processError.accept(errorContext); }, () -> logger.info("Load balancing completed successfully")); } /* * This method works with the given partition ownership details and Event Hub partitions to evaluate whether the * current Event Processor should take on the responsibility of processing more partitions. */ /* * Check if partition ownership data is valid before proceeding with load balancing. */ private boolean isValid(final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap.values() .stream() .noneMatch(partitionOwnership -> { return partitionOwnership.getEventHubName() == null || !partitionOwnership.getEventHubName().equals(this.eventHubName) || partitionOwnership.getConsumerGroup() == null || !partitionOwnership.getConsumerGroup().equals(this.consumerGroupName) || partitionOwnership.getPartitionId() == null || partitionOwnership.getLastModifiedTime() == null || partitionOwnership.getETag() == null; }); } /* * Find the event processor that owns the maximum number of partitions and steal a random partition * from it. */ private String findPartitionToSteal(final Map<String, List<PartitionOwnership>> ownerPartitionMap) { Map.Entry<String, List<PartitionOwnership>> ownerWithMaxPartitions = ownerPartitionMap.entrySet() .stream() .max(Comparator.comparingInt(entry -> entry.getValue().size())) .get(); int numberOfPartitions = ownerWithMaxPartitions.getValue().size(); logger.info("Owner id {} owns {} partitions, stealing a partition from it", ownerWithMaxPartitions.getKey(), numberOfPartitions); return ownerWithMaxPartitions.getValue().get(RANDOM.nextInt(numberOfPartitions)).getPartitionId(); } /* * When the load is balanced, all active event processors own at least {@code minPartitionsPerEventProcessor} * and only {@code numberOfEventProcessorsWithAdditionalPartition} event processors will own 1 additional * partition. */ private boolean isLoadBalanced(final int minPartitionsPerEventProcessor, final int numberOfEventProcessorsWithAdditionalPartition, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int count = 0; for (List<PartitionOwnership> partitionOwnership : ownerPartitionMap.values()) { int numberOfPartitions = partitionOwnership.size(); if (numberOfPartitions < minPartitionsPerEventProcessor || numberOfPartitions > minPartitionsPerEventProcessor + 1) { return false; } if (numberOfPartitions == minPartitionsPerEventProcessor + 1) { count++; } } return count == numberOfEventProcessorsWithAdditionalPartition; } /* * This method is called after determining that the load is not balanced. This method will evaluate * if the current event processor should own more partitions. Specifically, this method returns true if the * current event processor owns less than the minimum number of partitions or if it owns the minimum number * and no other event processor owns lesser number of partitions than this event processor. */ private boolean shouldOwnMorePartitions(final int minPartitionsPerEventProcessor, final Map<String, List<PartitionOwnership>> ownerPartitionMap) { int numberOfPartitionsOwned = ownerPartitionMap.get(this.ownerId).size(); int leastPartitionsOwnedByAnyEventProcessor = ownerPartitionMap.values().stream().min(Comparator.comparingInt(List::size)).get().size(); return numberOfPartitionsOwned < minPartitionsPerEventProcessor || numberOfPartitionsOwned == leastPartitionsOwnedByAnyEventProcessor; } /* * This method will create a new map of partition id and PartitionOwnership containing only those partitions * that are actively owned. All entries in the original map returned by CheckpointStore that haven't been * modified for a duration of time greater than the allowed inactivity time limit are assumed to be owned by * dead event processors. These will not be included in the map returned by this method. */ private Map<String, PartitionOwnership> removeInactivePartitionOwnerships( final Map<String, PartitionOwnership> partitionOwnershipMap) { return partitionOwnershipMap .entrySet() .stream() .filter(entry -> { return (System.currentTimeMillis() - entry.getValue().getLastModifiedTime() < TimeUnit.SECONDS .toMillis(inactiveTimeLimitInSeconds)) && !CoreUtils.isNullOrEmpty(entry.getValue().getOwnerId()); }).collect(Collectors.toMap(Entry::getKey, Entry::getValue)); } private void claimOwnership(final Map<String, PartitionOwnership> partitionOwnershipMap, Map<String, List<PartitionOwnership>> ownerPartitionsMap, final String partitionIdToClaim) { logger.info("Attempting to claim ownership of partition {}", partitionIdToClaim); PartitionOwnership ownershipRequest = createPartitionOwnershipRequest(partitionOwnershipMap, partitionIdToClaim); List<PartitionOwnership> partitionsToClaim = new ArrayList<>(); partitionsToClaim.add(ownershipRequest); partitionsToClaim.addAll(partitionPumpManager.getPartitionPumps() .keySet() .stream() .map(partitionId -> createPartitionOwnershipRequest(partitionOwnershipMap, partitionId)) .collect(Collectors.toList())); checkpointStore .claimOwnership(partitionsToClaim) .timeout(Duration.ofMinutes(1)) .doOnNext(partitionOwnership -> logger.info("Successfully claimed ownership of partition {}", partitionOwnership.getPartitionId())) .doOnError(ex -> logger .warning(Messages.FAILED_TO_CLAIM_OWNERSHIP, ownershipRequest.getPartitionId(), ex.getMessage(), ex)) .collectList() .zipWith(checkpointStore.listCheckpoints(fullyQualifiedNamespace, eventHubName, consumerGroupName) .collectMap(checkpoint -> checkpoint.getPartitionId(), Function.identity())) .subscribe(ownedPartitionCheckpointsTuple -> { ownedPartitionCheckpointsTuple.getT1() .stream() .forEach(po -> partitionPumpManager.startPartitionPump(po, ownedPartitionCheckpointsTuple.getT2().get(po.getPartitionId()))); }, ex -> { throw logger.logExceptionAsError(new RuntimeException("Error while listing checkpoints", ex)); }); } private PartitionOwnership createPartitionOwnershipRequest( final Map<String, PartitionOwnership> partitionOwnershipMap, final String partitionIdToClaim) { PartitionOwnership previousPartitionOwnership = partitionOwnershipMap.get(partitionIdToClaim); PartitionOwnership partitionOwnershipRequest = new PartitionOwnership() .setFullyQualifiedNamespace(this.fullyQualifiedNamespace) .setOwnerId(this.ownerId) .setPartitionId(partitionIdToClaim) .setConsumerGroup(this.consumerGroupName) .setEventHubName(this.eventHubName) .setETag(previousPartitionOwnership == null ? null : previousPartitionOwnership.getETag()); return partitionOwnershipRequest; } }
since you are refactoring switch ` statistics.getErroneousDocumentsCount()` and `statistics.getValidDocumentsCount()` will resolve this issue #7072 I found while writing samples.
static TextDocumentBatchStatistics toBatchStatistics(RequestStatistics statistics) { return new TextDocumentBatchStatistics(statistics.getDocumentsCount(), statistics.getErroneousDocumentsCount(), statistics.getValidDocumentsCount(), statistics.getTransactionsCount()); }
return new TextDocumentBatchStatistics(statistics.getDocumentsCount(), statistics.getErroneousDocumentsCount(),
static TextDocumentBatchStatistics toBatchStatistics(RequestStatistics statistics) { return new TextDocumentBatchStatistics(statistics.getDocumentsCount(), statistics.getValidDocumentsCount(), statistics.getErroneousDocumentsCount(), statistics.getTransactionsCount()); }
class Transforms { /** * Given a list of inputs will apply the indexing function to it and return the updated list. * * @param textInputs the inputs to apply the mapping function to. * @param mappingFunction the function which applies the index to the incoming input value. * @param <T> the type of items being returned in the list. * @return The list holding all the generic items combined. */ static <T> List<T> mapByIndex(List<String> textInputs, BiFunction<String, String, T> mappingFunction) { return IntStream.range(0, textInputs.size()) .mapToObj(index -> mappingFunction.apply(String.valueOf(index), textInputs.get(index))) .collect(Collectors.toList()); } /** * Convert {@link DocumentStatistics} to {@link TextDocumentStatistics} * * @param statistics the {@link DocumentStatistics} provided by the service. * @return the {@link TextDocumentStatistics} returned by the SDK. */ static TextDocumentStatistics toTextDocumentStatistics(DocumentStatistics statistics) { return new TextDocumentStatistics(statistics.getCharactersCount(), statistics.getTransactionsCount()); } /** * Convert {@link RequestStatistics} to {@link TextDocumentBatchStatistics} * * @param statistics the {@link RequestStatistics} provided by the service. * @return the {@link TextDocumentBatchStatistics} returned by the SDK. */ /** * Convert {@link TextAnalyticsError} to {@link com.azure.ai.textanalytics.models.TextAnalyticsError} * * @param textAnalyticsError the {@link TextAnalyticsError} returned by the service. * @return the {@link com.azure.ai.textanalytics.models.TextAnalyticsError} returned by the SDK. */ static com.azure.ai.textanalytics.models.TextAnalyticsError toTextAnalyticsError( TextAnalyticsError textAnalyticsError) { return new com.azure.ai.textanalytics.models.TextAnalyticsError( ErrorCodeValue.fromString(textAnalyticsError.getCode().toString()), textAnalyticsError.getMessage(), textAnalyticsError.getTarget(), textAnalyticsError.getDetails() == null ? null : setErrors(textAnalyticsError.getDetails())); } /** * Convert the incoming input {@link TextDocumentInput} to the service expected {@link MultiLanguageInput}. * * @param textInputs the user provided input in {@link TextDocumentInput} * @return the service required input {@link MultiLanguageInput} */ static List<MultiLanguageInput> toMultiLanguageInput(List<TextDocumentInput> textInputs) { List<MultiLanguageInput> multiLanguageInputs = new ArrayList<>(); for (TextDocumentInput textDocumentInput : textInputs) { multiLanguageInputs.add(new MultiLanguageInput().setId(textDocumentInput.getId()) .setText(textDocumentInput.getText()).setLanguage(textDocumentInput.getLanguage())); } return multiLanguageInputs; } /** * Helper method to set error details on {@link TextAnalyticsError}. * * @param details about specific errors that led to this reported error. * @return the {@link TextAnalyticsError} returned by the SDK. */ private static List<com.azure.ai.textanalytics.models.TextAnalyticsError> setErrors( List<TextAnalyticsError> details) { List<com.azure.ai.textanalytics.models.TextAnalyticsError> detailsList = new ArrayList<>(); for (TextAnalyticsError error : details) { detailsList.add(new com.azure.ai.textanalytics.models.TextAnalyticsError( ErrorCodeValue.fromString(error.getCode().toString()), error.getMessage(), error.getTarget(), error.getDetails() == null ? null : setErrors(error.getDetails()))); } return detailsList; } }
class Transforms { /** * Given a list of inputs will apply the indexing function to it and return the updated list. * * @param textInputs the inputs to apply the mapping function to. * @param mappingFunction the function which applies the index to the incoming input value. * @param <T> the type of items being returned in the list. * @return The list holding all the generic items combined. */ static <T> List<T> mapByIndex(List<String> textInputs, BiFunction<String, String, T> mappingFunction) { return IntStream.range(0, textInputs.size()) .mapToObj(index -> mappingFunction.apply(String.valueOf(index), textInputs.get(index))) .collect(Collectors.toList()); } /** * Convert {@link DocumentStatistics} to {@link TextDocumentStatistics} * * @param statistics the {@link DocumentStatistics} provided by the service. * @return the {@link TextDocumentStatistics} returned by the SDK. */ static TextDocumentStatistics toTextDocumentStatistics(DocumentStatistics statistics) { return new TextDocumentStatistics(statistics.getCharactersCount(), statistics.getTransactionsCount()); } /** * Convert {@link RequestStatistics} to {@link TextDocumentBatchStatistics} * * @param statistics the {@link RequestStatistics} provided by the service. * @return the {@link TextDocumentBatchStatistics} returned by the SDK. */ /** * Convert {@link TextAnalyticsError} to {@link com.azure.ai.textanalytics.models.TextAnalyticsError} * * @param textAnalyticsError the {@link TextAnalyticsError} returned by the service. * @return the {@link com.azure.ai.textanalytics.models.TextAnalyticsError} returned by the SDK. */ static com.azure.ai.textanalytics.models.TextAnalyticsError toTextAnalyticsError( TextAnalyticsError textAnalyticsError) { return new com.azure.ai.textanalytics.models.TextAnalyticsError( ErrorCodeValue.fromString(textAnalyticsError.getCode().toString()), textAnalyticsError.getMessage(), textAnalyticsError.getTarget(), textAnalyticsError.getDetails() == null ? null : setErrors(textAnalyticsError.getDetails())); } /** * Convert the incoming input {@link TextDocumentInput} to the service expected {@link MultiLanguageInput}. * * @param textInputs the user provided input in {@link TextDocumentInput} * @return the service required input {@link MultiLanguageInput} */ static List<MultiLanguageInput> toMultiLanguageInput(List<TextDocumentInput> textInputs) { List<MultiLanguageInput> multiLanguageInputs = new ArrayList<>(); for (TextDocumentInput textDocumentInput : textInputs) { multiLanguageInputs.add(new MultiLanguageInput().setId(textDocumentInput.getId()) .setText(textDocumentInput.getText()).setLanguage(textDocumentInput.getLanguage())); } return multiLanguageInputs; } /** * Helper method to set error details on {@link TextAnalyticsError}. * * @param details about specific errors that led to this reported error. * @return the {@link TextAnalyticsError} returned by the SDK. */ private static List<com.azure.ai.textanalytics.models.TextAnalyticsError> setErrors( List<TextAnalyticsError> details) { List<com.azure.ai.textanalytics.models.TextAnalyticsError> detailsList = new ArrayList<>(); for (TextAnalyticsError error : details) { detailsList.add(new com.azure.ai.textanalytics.models.TextAnalyticsError( ErrorCodeValue.fromString(error.getCode().toString()), error.getMessage(), error.getTarget(), error.getDetails() == null ? null : setErrors(error.getDetails()))); } return detailsList; } }
Surprisingly, it turns out the service actually will accept `x-ms-client-request-id` if `client-request-id` is not provided. This must be a case of the service having changed after this part of the Track 1 .NET SDK was written. We can remove this special case whenever is convenient (perhaps before GA).
public SearchIndexAsyncClient buildAsyncClient() { policies.add(new AddHeadersPolicy(headers)); policies.add(new RequestIdPolicy("client-request-id")); policies.add(new AddDatePolicy()); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? new RetryPolicy() : retryPolicy); HttpPolicyProviders.addAfterRetryPolicies(policies); return new SearchIndexAsyncClient(endpoint, indexName, apiVersion, prepareForBuildClient()); }
policies.add(new RequestIdPolicy("client-request-id"));
public SearchIndexAsyncClient buildAsyncClient() { policies.add(new AddHeadersPolicy(headers)); policies.add(new RequestIdPolicy("client-request-id")); policies.add(new AddDatePolicy()); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? new RetryPolicy() : retryPolicy); HttpPolicyProviders.addAfterRetryPolicies(policies); return new SearchIndexAsyncClient(endpoint, indexName, apiVersion, prepareForBuildClient()); }
class SearchIndexClientBuilder { private static final String ECHO_REQUEST_ID_HEADER = "x-ms-return-client-request-id"; private static final String SEARCH_PROPERTIES = "azure-search.properties"; private static final String NAME = "name"; private static final String VERSION = "version"; SearchApiKeyCredential searchApiKeyCredential; SearchServiceVersion apiVersion; String endpoint; HttpClient httpClient; HttpLogOptions httpLogOptions; Configuration configuration; List<HttpPipelinePolicy> policies; private String clientName; private String clientVersion; private String indexName; private final HttpHeaders headers; private RetryPolicy retryPolicy; private final ClientLogger logger = new ClientLogger(SearchIndexClientBuilder.class); /** * Default Constructor */ public SearchIndexClientBuilder() { apiVersion = SearchServiceVersion.getLatest(); policies = new ArrayList<>(); httpClient = HttpClient.createDefault(); httpLogOptions = new HttpLogOptions(); Map<String, String> properties = CoreUtils.getProperties(SEARCH_PROPERTIES); clientName = properties.getOrDefault(NAME, "UnknownName"); clientVersion = properties.getOrDefault(VERSION, "UnknownVersion"); headers = new HttpHeaders() .put(ECHO_REQUEST_ID_HEADER, "true"); } /** * Sets the api version to work against * * @param apiVersion api version * @return the updated SearchIndexClientBuilder object */ public SearchIndexClientBuilder apiVersion(SearchServiceVersion apiVersion) { if (apiVersion == null) { throw logger.logExceptionAsError(new IllegalArgumentException("Invalid apiVersion")); } this.apiVersion = apiVersion; return this; } /** * Returns the list of policies configured for this builder. * * @return List of HttpPipelinePolicy * */ List<HttpPipelinePolicy> getPolicies() { return this.policies; } /** * Sets the Azure Cognitive Search service endpoint * * @param endpoint the endpoint URL to the Azure Cognitive Search service * @return the updated SearchIndexClientBuilder object * @throws IllegalArgumentException on invalid service endpoint */ public SearchIndexClientBuilder endpoint(String endpoint) { try { new URL(endpoint); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL")); } this.endpoint = endpoint; return this; } /** * Sets the index name * * @param indexName name of the index * @return the updated SearchIndexClientBuilder object */ public SearchIndexClientBuilder indexName(String indexName) { if (CoreUtils.isNullOrEmpty(indexName)) { throw logger.logExceptionAsError(new IllegalArgumentException("Invalid indexName")); } this.indexName = indexName; return this; } /** * Set the http client (optional). If this is not set, a default httpClient will be created * * @param httpClient value of httpClient * @return the updated SearchIndexClientBuilder object */ public SearchIndexClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /** * Sets the api key to use for request authentication. * * @param searchApiKeyCredential api key for request authentication * @return the updated SearchIndexClientBuilder object * @throws IllegalArgumentException when the api key is empty */ public SearchIndexClientBuilder credential(SearchApiKeyCredential searchApiKeyCredential) { if (searchApiKeyCredential == null) { throw logger.logExceptionAsError(new NullPointerException("Empty apiKeyCredentials")); } if (CoreUtils.isNullOrEmpty(searchApiKeyCredential.getApiKey())) { throw logger.logExceptionAsError(new IllegalArgumentException("Empty apiKeyCredentials")); } this.searchApiKeyCredential = searchApiKeyCredential; return this; } /** * Sets the configuration store that is used during construction of the service client. * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store. * @return The updated SearchIndexClientBuilder object. */ public SearchIndexClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Http Pipeline policy * * @param policy policy to add to the pipeline * @return the updated SearchIndexClientBuilder object */ public SearchIndexClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); this.policies.add(policy); return this; } /** * Sets the {@link RetryPolicy} that is used when each request is sent. * <p> * The default retry policy will be used if not provided {@link SearchIndexClientBuilder * to build {@link SearchServiceAsyncClient} or {@link SearchServiceClient}. * * @param retryPolicy RetryPolicy applied to each request. * @return The updated SearchIndexClientBuilder object. */ public SearchIndexClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return The updated SearchIndexClientBuilder object. */ public SearchIndexClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * @return a {@link SearchIndexClient} created from the configurations in this builder. */ public SearchIndexClient buildClient() { return new SearchIndexClient(buildAsyncClient()); } /** * @return a {@link SearchIndexAsyncClient} created from the configurations in this builder. */ HttpPipeline prepareForBuildClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; if (searchApiKeyCredential != null) { this.policies.add(new SearchApiKeyPipelinePolicy(searchApiKeyCredential)); } policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, buildConfiguration)); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder() .httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } }
class SearchIndexClientBuilder { private static final String ECHO_REQUEST_ID_HEADER = "x-ms-return-client-request-id"; private static final String SEARCH_PROPERTIES = "azure-search.properties"; private static final String NAME = "name"; private static final String VERSION = "version"; SearchApiKeyCredential searchApiKeyCredential; SearchServiceVersion apiVersion; String endpoint; HttpClient httpClient; HttpLogOptions httpLogOptions; Configuration configuration; List<HttpPipelinePolicy> policies; private String clientName; private String clientVersion; private String indexName; private final HttpHeaders headers; private RetryPolicy retryPolicy; private final ClientLogger logger = new ClientLogger(SearchIndexClientBuilder.class); /** * Default Constructor */ public SearchIndexClientBuilder() { apiVersion = SearchServiceVersion.getLatest(); policies = new ArrayList<>(); httpClient = HttpClient.createDefault(); httpLogOptions = new HttpLogOptions(); Map<String, String> properties = CoreUtils.getProperties(SEARCH_PROPERTIES); clientName = properties.getOrDefault(NAME, "UnknownName"); clientVersion = properties.getOrDefault(VERSION, "UnknownVersion"); headers = new HttpHeaders() .put(ECHO_REQUEST_ID_HEADER, "true"); } /** * Sets the api version to work against * * @param apiVersion api version * @return the updated SearchIndexClientBuilder object */ public SearchIndexClientBuilder apiVersion(SearchServiceVersion apiVersion) { if (apiVersion == null) { throw logger.logExceptionAsError(new IllegalArgumentException("Invalid apiVersion")); } this.apiVersion = apiVersion; return this; } /** * Returns the list of policies configured for this builder. * * @return List of HttpPipelinePolicy * */ List<HttpPipelinePolicy> getPolicies() { return this.policies; } /** * Sets the Azure Cognitive Search service endpoint * * @param endpoint the endpoint URL to the Azure Cognitive Search service * @return the updated SearchIndexClientBuilder object * @throws IllegalArgumentException on invalid service endpoint */ public SearchIndexClientBuilder endpoint(String endpoint) { try { new URL(endpoint); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL")); } this.endpoint = endpoint; return this; } /** * Sets the index name * * @param indexName name of the index * @return the updated SearchIndexClientBuilder object */ public SearchIndexClientBuilder indexName(String indexName) { if (CoreUtils.isNullOrEmpty(indexName)) { throw logger.logExceptionAsError(new IllegalArgumentException("Invalid indexName")); } this.indexName = indexName; return this; } /** * Set the http client (optional). If this is not set, a default httpClient will be created * * @param httpClient value of httpClient * @return the updated SearchIndexClientBuilder object */ public SearchIndexClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /** * Sets the api key to use for request authentication. * * @param searchApiKeyCredential api key for request authentication * @return the updated SearchIndexClientBuilder object * @throws IllegalArgumentException when the api key is empty */ public SearchIndexClientBuilder credential(SearchApiKeyCredential searchApiKeyCredential) { if (searchApiKeyCredential == null) { throw logger.logExceptionAsError(new NullPointerException("Empty apiKeyCredentials")); } if (CoreUtils.isNullOrEmpty(searchApiKeyCredential.getApiKey())) { throw logger.logExceptionAsError(new IllegalArgumentException("Empty apiKeyCredentials")); } this.searchApiKeyCredential = searchApiKeyCredential; return this; } /** * Sets the configuration store that is used during construction of the service client. * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store. * @return The updated SearchIndexClientBuilder object. */ public SearchIndexClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Http Pipeline policy * * @param policy policy to add to the pipeline * @return the updated SearchIndexClientBuilder object */ public SearchIndexClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); this.policies.add(policy); return this; } /** * Sets the {@link RetryPolicy} that is used when each request is sent. * <p> * The default retry policy will be used if not provided {@link SearchIndexClientBuilder * to build {@link SearchServiceAsyncClient} or {@link SearchServiceClient}. * * @param retryPolicy RetryPolicy applied to each request. * @return The updated SearchIndexClientBuilder object. */ public SearchIndexClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return The updated SearchIndexClientBuilder object. */ public SearchIndexClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * @return a {@link SearchIndexClient} created from the configurations in this builder. */ public SearchIndexClient buildClient() { return new SearchIndexClient(buildAsyncClient()); } /** * @return a {@link SearchIndexAsyncClient} created from the configurations in this builder. */ HttpPipeline prepareForBuildClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; if (searchApiKeyCredential != null) { this.policies.add(new SearchApiKeyPipelinePolicy(searchApiKeyCredential)); } policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, buildConfiguration)); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder() .httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } }
I liked the other one better. It shows an example of how it should look.
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .subscriptionKey("<replace-with-your-text-analytics-key-here>") .endpoint("<replace-with-your-text-analytics-endpoint-here>") .buildClient(); String text = "The hotel was dark and unclean."; final AnalyzeSentimentResult sentimentResult = client.analyzeSentiment(text); final TextSentiment documentSentiment = sentimentResult.getDocumentSentiment(); System.out.printf( "Recognized sentiment: %s, positive score: %s, neutral score: %s, negative score: %s.%n", documentSentiment.getTextSentimentClass(), documentSentiment.getPositiveScore(), documentSentiment.getNeutralScore(), documentSentiment.getNegativeScore()); final List<TextSentiment> sentiments = sentimentResult.getSentenceSentiments(); for (TextSentiment textSentiment : sentiments) { System.out.printf( "Recognized sentence sentiment: %s, positive score: %s, neutral score: %s, negative score: %s.%n", textSentiment.getTextSentimentClass(), textSentiment.getPositiveScore(), textSentiment.getNeutralScore(), textSentiment.getNegativeScore()); } }
.endpoint("<replace-with-your-text-analytics-endpoint-here>")
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .subscriptionKey("{subscription_key}") .endpoint("https: .buildClient(); String text = "The hotel was dark and unclean."; final AnalyzeSentimentResult sentimentResult = client.analyzeSentiment(text); final TextSentiment documentSentiment = sentimentResult.getDocumentSentiment(); System.out.printf( "Recognized sentiment: %s, positive score: %s, neutral score: %s, negative score: %s.%n", documentSentiment.getTextSentimentClass(), documentSentiment.getPositiveScore(), documentSentiment.getNeutralScore(), documentSentiment.getNegativeScore()); for (TextSentiment textSentiment : sentimentResult.getSentenceSentiments()) { System.out.printf( "Recognized sentence sentiment: %s, positive score: %s, neutral score: %s, negative score: %s.%n", textSentiment.getTextSentimentClass(), textSentiment.getPositiveScore(), textSentiment.getNeutralScore(), textSentiment.getNegativeScore()); } }
class AnalyzeSentiment { /** * Main method to invoke this demo about how to analyze sentiment of a text input. * * @param args Unused arguments to the program. */ }
class AnalyzeSentiment { /** * Main method to invoke this demo about how to analyze the sentiment of an input text. * * @param args Unused arguments to the program. */ }
what about this ``` .subscriptionKey("{subscription_key}") .endpoint("https://{servicename}.cognitiveservices.azure.com/") ```
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .subscriptionKey("<replace-with-your-text-analytics-key-here>") .endpoint("<replace-with-your-text-analytics-endpoint-here>") .buildClient(); String text = "The hotel was dark and unclean."; final AnalyzeSentimentResult sentimentResult = client.analyzeSentiment(text); final TextSentiment documentSentiment = sentimentResult.getDocumentSentiment(); System.out.printf( "Recognized sentiment: %s, positive score: %s, neutral score: %s, negative score: %s.%n", documentSentiment.getTextSentimentClass(), documentSentiment.getPositiveScore(), documentSentiment.getNeutralScore(), documentSentiment.getNegativeScore()); final List<TextSentiment> sentiments = sentimentResult.getSentenceSentiments(); for (TextSentiment textSentiment : sentiments) { System.out.printf( "Recognized sentence sentiment: %s, positive score: %s, neutral score: %s, negative score: %s.%n", textSentiment.getTextSentimentClass(), textSentiment.getPositiveScore(), textSentiment.getNeutralScore(), textSentiment.getNegativeScore()); } }
.endpoint("<replace-with-your-text-analytics-endpoint-here>")
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .subscriptionKey("{subscription_key}") .endpoint("https: .buildClient(); String text = "The hotel was dark and unclean."; final AnalyzeSentimentResult sentimentResult = client.analyzeSentiment(text); final TextSentiment documentSentiment = sentimentResult.getDocumentSentiment(); System.out.printf( "Recognized sentiment: %s, positive score: %s, neutral score: %s, negative score: %s.%n", documentSentiment.getTextSentimentClass(), documentSentiment.getPositiveScore(), documentSentiment.getNeutralScore(), documentSentiment.getNegativeScore()); for (TextSentiment textSentiment : sentimentResult.getSentenceSentiments()) { System.out.printf( "Recognized sentence sentiment: %s, positive score: %s, neutral score: %s, negative score: %s.%n", textSentiment.getTextSentimentClass(), textSentiment.getPositiveScore(), textSentiment.getNeutralScore(), textSentiment.getNegativeScore()); } }
class AnalyzeSentiment { /** * Main method to invoke this demo about how to analyze sentiment of a text input. * * @param args Unused arguments to the program. */ }
class AnalyzeSentiment { /** * Main method to invoke this demo about how to analyze the sentiment of an input text. * * @param args Unused arguments to the program. */ }
Works for me
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .subscriptionKey("<replace-with-your-text-analytics-key-here>") .endpoint("<replace-with-your-text-analytics-endpoint-here>") .buildClient(); String text = "The hotel was dark and unclean."; final AnalyzeSentimentResult sentimentResult = client.analyzeSentiment(text); final TextSentiment documentSentiment = sentimentResult.getDocumentSentiment(); System.out.printf( "Recognized sentiment: %s, positive score: %s, neutral score: %s, negative score: %s.%n", documentSentiment.getTextSentimentClass(), documentSentiment.getPositiveScore(), documentSentiment.getNeutralScore(), documentSentiment.getNegativeScore()); final List<TextSentiment> sentiments = sentimentResult.getSentenceSentiments(); for (TextSentiment textSentiment : sentiments) { System.out.printf( "Recognized sentence sentiment: %s, positive score: %s, neutral score: %s, negative score: %s.%n", textSentiment.getTextSentimentClass(), textSentiment.getPositiveScore(), textSentiment.getNeutralScore(), textSentiment.getNegativeScore()); } }
.endpoint("<replace-with-your-text-analytics-endpoint-here>")
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .subscriptionKey("{subscription_key}") .endpoint("https: .buildClient(); String text = "The hotel was dark and unclean."; final AnalyzeSentimentResult sentimentResult = client.analyzeSentiment(text); final TextSentiment documentSentiment = sentimentResult.getDocumentSentiment(); System.out.printf( "Recognized sentiment: %s, positive score: %s, neutral score: %s, negative score: %s.%n", documentSentiment.getTextSentimentClass(), documentSentiment.getPositiveScore(), documentSentiment.getNeutralScore(), documentSentiment.getNegativeScore()); for (TextSentiment textSentiment : sentimentResult.getSentenceSentiments()) { System.out.printf( "Recognized sentence sentiment: %s, positive score: %s, neutral score: %s, negative score: %s.%n", textSentiment.getTextSentimentClass(), textSentiment.getPositiveScore(), textSentiment.getNeutralScore(), textSentiment.getNegativeScore()); } }
class AnalyzeSentiment { /** * Main method to invoke this demo about how to analyze sentiment of a text input. * * @param args Unused arguments to the program. */ }
class AnalyzeSentiment { /** * Main method to invoke this demo about how to analyze the sentiment of an input text. * * @param args Unused arguments to the program. */ }
You don't need to allocate a variable for this. `for (TextSentiment textSentiment : result.getSentenceSentiments()) {` would work since you only reference it once.
public static void main(String[] args) { TextAnalyticsAsyncClient client = new TextAnalyticsClientBuilder() .subscriptionKey("<replace-with-your-text-analytics-key-here>") .endpoint("<replace-with-your-text-analytics-endpoint-here>") .buildAsyncClient(); String text = "The hotel was dark and unclean."; client.analyzeSentiment(text).subscribe( result -> { final TextSentiment documentSentiment = result.getDocumentSentiment(); System.out.printf( "Recognized sentiment: %s, positive score: %s, neutral score: %s, negative score: %s.%n", documentSentiment.getTextSentimentClass(), documentSentiment.getPositiveScore(), documentSentiment.getNeutralScore(), documentSentiment.getNegativeScore()); final List<TextSentiment> sentiments = result.getSentenceSentiments(); for (TextSentiment textSentiment : sentiments) { System.out.printf( "Recognized sentence sentiment: %s, positive score: %s, neutral score: %s, negative score: %s.%n", textSentiment.getTextSentimentClass(), textSentiment.getPositiveScore(), textSentiment.getNeutralScore(), textSentiment.getNegativeScore()); } }, error -> System.err.println("There was an error analyzing sentiment of the text." + error), () -> System.out.println("Sentiment analyzed.")); try { TimeUnit.SECONDS.sleep(5); } catch (InterruptedException ignored) { } }
final List<TextSentiment> sentiments = result.getSentenceSentiments();
public static void main(String[] args) { TextAnalyticsAsyncClient client = new TextAnalyticsClientBuilder() .subscriptionKey("{subscription_key}") .endpoint("https: .buildAsyncClient(); String text = "The hotel was dark and unclean."; client.analyzeSentiment(text).subscribe( result -> { final TextSentiment documentSentiment = result.getDocumentSentiment(); System.out.printf( "Recognized sentiment: %s, positive score: %s, neutral score: %s, negative score: %s.%n", documentSentiment.getTextSentimentClass(), documentSentiment.getPositiveScore(), documentSentiment.getNeutralScore(), documentSentiment.getNegativeScore()); for (TextSentiment textSentiment : result.getSentenceSentiments()) { System.out.printf( "Recognized sentence sentiment: %s, positive score: %s, neutral score: %s, negative score: %s.%n", textSentiment.getTextSentimentClass(), textSentiment.getPositiveScore(), textSentiment.getNeutralScore(), textSentiment.getNegativeScore()); } }, error -> System.err.println("There was an error analyzing sentiment of the text." + error), () -> System.out.println("Sentiment analyzed.")); try { TimeUnit.SECONDS.sleep(5); } catch (InterruptedException ignored) { } }
class AnalyzeSentimentAsync { /** * Main method to invoke this demo about how to analyze sentiment of a text input. * * @param args Unused arguments to the program. */ }
class AnalyzeSentimentAsync { /** * Main method to invoke this demo about how to analyze the sentiment of an input text. * * @param args Unused arguments to the program. */ }
That seems like an odd way to check for error. I thought you had a `hasError()` method?
public static void main(String[] args) { TextAnalyticsAsyncClient client = new TextAnalyticsClientBuilder() .subscriptionKey("<replace-with-your-text-analytics-key-here>") .endpoint("<replace-with-your-text-analytics-endpoint-here>") .buildAsyncClient(); List<TextDocumentInput> inputs = Arrays.asList( new TextDocumentInput("1", "The hotel was dark and unclean. The restaurant had amazing gnocchi.", "en"), new TextDocumentInput("2", "The restaurant had amazing gnocchi. The hotel was dark and unclean.", "en") ); final TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setShowStatistics(true); client.analyzeBatchSentimentWithResponse(inputs, requestOptions).subscribe( result -> { DocumentResultCollection<AnalyzeSentimentResult> analyzedBatchResult = result.getValue(); System.out.printf("Model version: %s%n", analyzedBatchResult.getModelVersion()); final TextDocumentBatchStatistics batchStatistics = analyzedBatchResult.getStatistics(); System.out.printf("A batch of document statistics, document count: %s, erroneous document count: %s, transaction count: %s, valid document count: %s.%n", batchStatistics.getDocumentCount(), batchStatistics.getErroneousDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); for (AnalyzeSentimentResult analyzeSentimentResult : analyzedBatchResult) { System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId()); final TextSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment(); if (documentSentiment == null) { System.out.printf("Cannot analyze sentiment. Error: %s%n", analyzeSentimentResult.getError().getMessage()); continue; } System.out.printf("Analyzed document sentiment: %s, positive score: %s, neutral score: %s, negative score: %s, length of sentence: %s, offset of sentence: %s.%n", documentSentiment.getTextSentimentClass(), documentSentiment.getPositiveScore(), documentSentiment.getNeutralScore(), documentSentiment.getNegativeScore(), documentSentiment.getLength(), documentSentiment.getOffset()); for (TextSentiment sentenceSentiment : analyzeSentimentResult.getSentenceSentiments()) { System.out.printf("Analyzed sentence sentiment: %s, positive score: %s, neutral score: %s, negative score: %s, length of sentence: %s, offset of sentence: %s.%n", sentenceSentiment.getTextSentimentClass(), sentenceSentiment.getPositiveScore(), sentenceSentiment.getNeutralScore(), sentenceSentiment.getNegativeScore(), sentenceSentiment.getLength(), sentenceSentiment.getOffset()); } } }, error -> System.err.println("There was an error analyzing sentiment of the text inputs." + error), () -> System.out.println("Batch of sentiment analyzed.")); try { TimeUnit.SECONDS.sleep(5); } catch (InterruptedException ignored) { } }
if (documentSentiment == null) {
public static void main(String[] args) { TextAnalyticsAsyncClient client = new TextAnalyticsClientBuilder() .subscriptionKey("{subscription_key}") .endpoint("https: .buildAsyncClient(); List<TextDocumentInput> inputs = Arrays.asList( new TextDocumentInput("1", "The hotel was dark and unclean. The restaurant had amazing gnocchi.", "en"), new TextDocumentInput("2", "The restaurant had amazing gnocchi. The hotel was dark and unclean.", "en") ); final TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setShowStatistics(true); client.analyzeBatchSentimentWithResponse(inputs, requestOptions).subscribe( result -> { DocumentResultCollection<AnalyzeSentimentResult> analyzedBatchResult = result.getValue(); System.out.printf("Model version: %s%n", analyzedBatchResult.getModelVersion()); final TextDocumentBatchStatistics batchStatistics = analyzedBatchResult.getStatistics(); System.out.printf("A batch of document statistics, document count: %s, erroneous document count: %s, transaction count: %s, valid document count: %s.%n", batchStatistics.getDocumentCount(), batchStatistics.getErroneousDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); for (AnalyzeSentimentResult analyzeSentimentResult : analyzedBatchResult) { System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId()); if (analyzeSentimentResult.isError()) { System.out.printf("Cannot analyze sentiment. Error: %s%n", analyzeSentimentResult.getError().getMessage()); continue; } final TextSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment(); System.out.printf("Analyzed document sentiment: %s, positive score: %s, neutral score: %s, negative score: %s, length of sentence: %s, offset of sentence: %s.%n", documentSentiment.getTextSentimentClass(), documentSentiment.getPositiveScore(), documentSentiment.getNeutralScore(), documentSentiment.getNegativeScore(), documentSentiment.getLength(), documentSentiment.getOffset()); for (TextSentiment sentenceSentiment : analyzeSentimentResult.getSentenceSentiments()) { System.out.printf("Analyzed sentence sentiment: %s, positive score: %s, neutral score: %s, negative score: %s, length of sentence: %s, offset of sentence: %s.%n", sentenceSentiment.getTextSentimentClass(), sentenceSentiment.getPositiveScore(), sentenceSentiment.getNeutralScore(), sentenceSentiment.getNegativeScore(), sentenceSentiment.getLength(), sentenceSentiment.getOffset()); } } }, error -> System.err.println("There was an error analyzing sentiment of the text inputs." + error), () -> System.out.println("Batch of sentiment analyzed.")); try { TimeUnit.SECONDS.sleep(5); } catch (InterruptedException ignored) { } }
class AnalyzeSentimentBatchDocumentsAsync { /** * Main method to invoke this demo about how to analyze sentiment of a batch of text inputs. * * @param args Unused arguments to the program. */ }
class AnalyzeSentimentBatchDocumentsAsync { /** * Main method to invoke this demo about how to analyze the sentiments of a batch input text. * * @param args Unused arguments to the program. */ }
Do you use the response headers? I think for samples, we should show use-cases we expect the general populace to use.... analzeBatchSentiment() would be my usual use-case.
public static void main(String[] args) { TextAnalyticsAsyncClient client = new TextAnalyticsClientBuilder() .subscriptionKey("<replace-with-your-text-analytics-key-here>") .endpoint("<replace-with-your-text-analytics-endpoint-here>") .buildAsyncClient(); List<TextDocumentInput> inputs = Arrays.asList( new TextDocumentInput("1", "The hotel was dark and unclean. The restaurant had amazing gnocchi.", "en"), new TextDocumentInput("2", "The restaurant had amazing gnocchi. The hotel was dark and unclean.", "en") ); final TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setShowStatistics(true); client.analyzeBatchSentimentWithResponse(inputs, requestOptions).subscribe( result -> { DocumentResultCollection<AnalyzeSentimentResult> analyzedBatchResult = result.getValue(); System.out.printf("Model version: %s%n", analyzedBatchResult.getModelVersion()); final TextDocumentBatchStatistics batchStatistics = analyzedBatchResult.getStatistics(); System.out.printf("A batch of document statistics, document count: %s, erroneous document count: %s, transaction count: %s, valid document count: %s.%n", batchStatistics.getDocumentCount(), batchStatistics.getErroneousDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); for (AnalyzeSentimentResult analyzeSentimentResult : analyzedBatchResult) { System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId()); final TextSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment(); if (documentSentiment == null) { System.out.printf("Cannot analyze sentiment. Error: %s%n", analyzeSentimentResult.getError().getMessage()); continue; } System.out.printf("Analyzed document sentiment: %s, positive score: %s, neutral score: %s, negative score: %s, length of sentence: %s, offset of sentence: %s.%n", documentSentiment.getTextSentimentClass(), documentSentiment.getPositiveScore(), documentSentiment.getNeutralScore(), documentSentiment.getNegativeScore(), documentSentiment.getLength(), documentSentiment.getOffset()); for (TextSentiment sentenceSentiment : analyzeSentimentResult.getSentenceSentiments()) { System.out.printf("Analyzed sentence sentiment: %s, positive score: %s, neutral score: %s, negative score: %s, length of sentence: %s, offset of sentence: %s.%n", sentenceSentiment.getTextSentimentClass(), sentenceSentiment.getPositiveScore(), sentenceSentiment.getNeutralScore(), sentenceSentiment.getNegativeScore(), sentenceSentiment.getLength(), sentenceSentiment.getOffset()); } } }, error -> System.err.println("There was an error analyzing sentiment of the text inputs." + error), () -> System.out.println("Batch of sentiment analyzed.")); try { TimeUnit.SECONDS.sleep(5); } catch (InterruptedException ignored) { } }
client.analyzeBatchSentimentWithResponse(inputs, requestOptions).subscribe(
public static void main(String[] args) { TextAnalyticsAsyncClient client = new TextAnalyticsClientBuilder() .subscriptionKey("{subscription_key}") .endpoint("https: .buildAsyncClient(); List<TextDocumentInput> inputs = Arrays.asList( new TextDocumentInput("1", "The hotel was dark and unclean. The restaurant had amazing gnocchi.", "en"), new TextDocumentInput("2", "The restaurant had amazing gnocchi. The hotel was dark and unclean.", "en") ); final TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setShowStatistics(true); client.analyzeBatchSentimentWithResponse(inputs, requestOptions).subscribe( result -> { DocumentResultCollection<AnalyzeSentimentResult> analyzedBatchResult = result.getValue(); System.out.printf("Model version: %s%n", analyzedBatchResult.getModelVersion()); final TextDocumentBatchStatistics batchStatistics = analyzedBatchResult.getStatistics(); System.out.printf("A batch of document statistics, document count: %s, erroneous document count: %s, transaction count: %s, valid document count: %s.%n", batchStatistics.getDocumentCount(), batchStatistics.getErroneousDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); for (AnalyzeSentimentResult analyzeSentimentResult : analyzedBatchResult) { System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId()); if (analyzeSentimentResult.isError()) { System.out.printf("Cannot analyze sentiment. Error: %s%n", analyzeSentimentResult.getError().getMessage()); continue; } final TextSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment(); System.out.printf("Analyzed document sentiment: %s, positive score: %s, neutral score: %s, negative score: %s, length of sentence: %s, offset of sentence: %s.%n", documentSentiment.getTextSentimentClass(), documentSentiment.getPositiveScore(), documentSentiment.getNeutralScore(), documentSentiment.getNegativeScore(), documentSentiment.getLength(), documentSentiment.getOffset()); for (TextSentiment sentenceSentiment : analyzeSentimentResult.getSentenceSentiments()) { System.out.printf("Analyzed sentence sentiment: %s, positive score: %s, neutral score: %s, negative score: %s, length of sentence: %s, offset of sentence: %s.%n", sentenceSentiment.getTextSentimentClass(), sentenceSentiment.getPositiveScore(), sentenceSentiment.getNeutralScore(), sentenceSentiment.getNegativeScore(), sentenceSentiment.getLength(), sentenceSentiment.getOffset()); } } }, error -> System.err.println("There was an error analyzing sentiment of the text inputs." + error), () -> System.out.println("Batch of sentiment analyzed.")); try { TimeUnit.SECONDS.sleep(5); } catch (InterruptedException ignored) { } }
class AnalyzeSentimentBatchDocumentsAsync { /** * Main method to invoke this demo about how to analyze sentiment of a batch of text inputs. * * @param args Unused arguments to the program. */ }
class AnalyzeSentimentBatchDocumentsAsync { /** * Main method to invoke this demo about how to analyze the sentiments of a batch input text. * * @param args Unused arguments to the program. */ }
I'd reconsider the WithResponse calls. I thought we added this to encompass all use-cases, but I don't expect usual devs to care about the HTTP response.
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .subscriptionKey("<replace-with-your-text-analytics-key-here>") .endpoint("<replace-with-your-text-analytics-endpoint-here>") .buildClient(); List<TextDocumentInput> inputs = Arrays.asList( new TextDocumentInput("1", "My cat might need to see a veterinarian.", "en"), new TextDocumentInput("2", "The pitot tube is used to measure airspeed.", "en") ); final TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setShowStatistics(true); final DocumentResultCollection<ExtractKeyPhraseResult> extractedBatchResult = client.extractBatchKeyPhrasesWithResponse(inputs, requestOptions, Context.NONE).getValue(); System.out.printf("Model version: %s%n", extractedBatchResult.getModelVersion()); final TextDocumentBatchStatistics batchStatistics = extractedBatchResult.getStatistics(); System.out.printf("A batch of document statistics, document count: %s, erroneous document count: %s, transaction count: %s, valid document count: %s.%n", batchStatistics.getDocumentCount(), batchStatistics.getErroneousDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); for (ExtractKeyPhraseResult extractKeyPhraseResult : extractedBatchResult) { System.out.printf("Document ID: %s%n", extractKeyPhraseResult.getId()); final List<String> documentKeyPhrases = extractKeyPhraseResult.getKeyPhrases(); if (documentKeyPhrases == null) { System.out.printf("Cannot extract key phrases. Error: %s%n", extractKeyPhraseResult.getError().getMessage()); continue; } for (String keyPhrases : documentKeyPhrases) { System.out.printf("Extracted phrases: %s.%n", keyPhrases); } } }
final DocumentResultCollection<ExtractKeyPhraseResult> extractedBatchResult = client.extractBatchKeyPhrasesWithResponse(inputs, requestOptions, Context.NONE).getValue();
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .subscriptionKey("{subscription_key}") .endpoint("https: .buildClient(); List<TextDocumentInput> inputs = Arrays.asList( new TextDocumentInput("1", "My cat might need to see a veterinarian.", "en"), new TextDocumentInput("2", "The pitot tube is used to measure airspeed.", "en") ); final TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setShowStatistics(true); final DocumentResultCollection<ExtractKeyPhraseResult> extractedBatchResult = client.extractBatchKeyPhrasesWithResponse(inputs, requestOptions, Context.NONE).getValue(); System.out.printf("Model version: %s%n", extractedBatchResult.getModelVersion()); final TextDocumentBatchStatistics batchStatistics = extractedBatchResult.getStatistics(); System.out.printf("A batch of document statistics, document count: %s, erroneous document count: %s, transaction count: %s, valid document count: %s.%n", batchStatistics.getDocumentCount(), batchStatistics.getErroneousDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); for (ExtractKeyPhraseResult extractKeyPhraseResult : extractedBatchResult) { System.out.printf("Document ID: %s%n", extractKeyPhraseResult.getId()); if (extractKeyPhraseResult.isError()) { System.out.printf("Cannot extract key phrases. Error: %s%n", extractKeyPhraseResult.getError().getMessage()); continue; } for (String keyPhrases : extractKeyPhraseResult.getKeyPhrases()) { System.out.printf("Extracted phrases: %s.%n", keyPhrases); } } }
class ExtractKeyPhrasesBatchDocuments { /** * Main method to invoke this demo about how to extract key phrases of a batch of text inputs. * * @param args Unused arguments to the program. */ }
class ExtractKeyPhrasesBatchDocuments { /** * Main method to invoke this demo about how to extract the key phrases of a batch input text. * * @param args Unused arguments to the program. */ }
nit: can we call this variable `detectedPrimaryLanguage`
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .subscriptionKey("<replace-with-your-text-analytics-key-here>") .endpoint("<replace-with-your-text-analytics-endpoint-here>") .buildClient(); String text = "hello world"; final DetectLanguageResult detectLanguageResult = client.detectLanguage(text, "US"); final DetectedLanguage detectedDocumentLanguage = detectLanguageResult.getPrimaryLanguage(); System.out.printf("Detected primary language: %s, ISO 6391 name: %s, score: %s.%n", detectedDocumentLanguage.getName(), detectedDocumentLanguage.getIso6391Name(), detectedDocumentLanguage.getScore()); final List<DetectedLanguage> detectedLanguages = detectLanguageResult.getDetectedLanguages(); for (DetectedLanguage detectedLanguage : detectedLanguages) { System.out.printf("Other detected languages: %s, ISO 6391 name: %s, score: %s.%n", detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getScore()); } }
final DetectedLanguage detectedDocumentLanguage = detectLanguageResult.getPrimaryLanguage();
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .subscriptionKey("{subscription_key}") .endpoint("https: .buildClient(); String text = "hello world"; final DetectLanguageResult detectLanguageResult = client.detectLanguage(text, "US"); final DetectedLanguage detectedPrimaryLanguage = detectLanguageResult.getPrimaryLanguage(); System.out.printf("Detected primary language: %s, ISO 6391 name: %s, score: %s.%n", detectedPrimaryLanguage.getName(), detectedPrimaryLanguage.getIso6391Name(), detectedPrimaryLanguage.getScore()); for (DetectedLanguage detectedLanguage : detectLanguageResult.getDetectedLanguages()) { System.out.printf("Another detected language: %s, ISO 6391 name: %s, score: %s.%n", detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getScore()); } }
class HelloWorld { /** * Main method to invoke this demo about how to detect language of a text input. * * @param args Unused arguments to the program. */ }
class HelloWorld { /** * Main method to invoke this demo about how to detect the language of an input text. * * @param args Unused arguments to the program. */ }
Currently, we don't have `analyzeBatchSentiment(inputs, requestOptions)` API. So we will add one if needed
public static void main(String[] args) { TextAnalyticsAsyncClient client = new TextAnalyticsClientBuilder() .subscriptionKey("<replace-with-your-text-analytics-key-here>") .endpoint("<replace-with-your-text-analytics-endpoint-here>") .buildAsyncClient(); List<TextDocumentInput> inputs = Arrays.asList( new TextDocumentInput("1", "The hotel was dark and unclean. The restaurant had amazing gnocchi.", "en"), new TextDocumentInput("2", "The restaurant had amazing gnocchi. The hotel was dark and unclean.", "en") ); final TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setShowStatistics(true); client.analyzeBatchSentimentWithResponse(inputs, requestOptions).subscribe( result -> { DocumentResultCollection<AnalyzeSentimentResult> analyzedBatchResult = result.getValue(); System.out.printf("Model version: %s%n", analyzedBatchResult.getModelVersion()); final TextDocumentBatchStatistics batchStatistics = analyzedBatchResult.getStatistics(); System.out.printf("A batch of document statistics, document count: %s, erroneous document count: %s, transaction count: %s, valid document count: %s.%n", batchStatistics.getDocumentCount(), batchStatistics.getErroneousDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); for (AnalyzeSentimentResult analyzeSentimentResult : analyzedBatchResult) { System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId()); final TextSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment(); if (documentSentiment == null) { System.out.printf("Cannot analyze sentiment. Error: %s%n", analyzeSentimentResult.getError().getMessage()); continue; } System.out.printf("Analyzed document sentiment: %s, positive score: %s, neutral score: %s, negative score: %s, length of sentence: %s, offset of sentence: %s.%n", documentSentiment.getTextSentimentClass(), documentSentiment.getPositiveScore(), documentSentiment.getNeutralScore(), documentSentiment.getNegativeScore(), documentSentiment.getLength(), documentSentiment.getOffset()); for (TextSentiment sentenceSentiment : analyzeSentimentResult.getSentenceSentiments()) { System.out.printf("Analyzed sentence sentiment: %s, positive score: %s, neutral score: %s, negative score: %s, length of sentence: %s, offset of sentence: %s.%n", sentenceSentiment.getTextSentimentClass(), sentenceSentiment.getPositiveScore(), sentenceSentiment.getNeutralScore(), sentenceSentiment.getNegativeScore(), sentenceSentiment.getLength(), sentenceSentiment.getOffset()); } } }, error -> System.err.println("There was an error analyzing sentiment of the text inputs." + error), () -> System.out.println("Batch of sentiment analyzed.")); try { TimeUnit.SECONDS.sleep(5); } catch (InterruptedException ignored) { } }
client.analyzeBatchSentimentWithResponse(inputs, requestOptions).subscribe(
public static void main(String[] args) { TextAnalyticsAsyncClient client = new TextAnalyticsClientBuilder() .subscriptionKey("{subscription_key}") .endpoint("https: .buildAsyncClient(); List<TextDocumentInput> inputs = Arrays.asList( new TextDocumentInput("1", "The hotel was dark and unclean. The restaurant had amazing gnocchi.", "en"), new TextDocumentInput("2", "The restaurant had amazing gnocchi. The hotel was dark and unclean.", "en") ); final TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setShowStatistics(true); client.analyzeBatchSentimentWithResponse(inputs, requestOptions).subscribe( result -> { DocumentResultCollection<AnalyzeSentimentResult> analyzedBatchResult = result.getValue(); System.out.printf("Model version: %s%n", analyzedBatchResult.getModelVersion()); final TextDocumentBatchStatistics batchStatistics = analyzedBatchResult.getStatistics(); System.out.printf("A batch of document statistics, document count: %s, erroneous document count: %s, transaction count: %s, valid document count: %s.%n", batchStatistics.getDocumentCount(), batchStatistics.getErroneousDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); for (AnalyzeSentimentResult analyzeSentimentResult : analyzedBatchResult) { System.out.printf("Document ID: %s%n", analyzeSentimentResult.getId()); if (analyzeSentimentResult.isError()) { System.out.printf("Cannot analyze sentiment. Error: %s%n", analyzeSentimentResult.getError().getMessage()); continue; } final TextSentiment documentSentiment = analyzeSentimentResult.getDocumentSentiment(); System.out.printf("Analyzed document sentiment: %s, positive score: %s, neutral score: %s, negative score: %s, length of sentence: %s, offset of sentence: %s.%n", documentSentiment.getTextSentimentClass(), documentSentiment.getPositiveScore(), documentSentiment.getNeutralScore(), documentSentiment.getNegativeScore(), documentSentiment.getLength(), documentSentiment.getOffset()); for (TextSentiment sentenceSentiment : analyzeSentimentResult.getSentenceSentiments()) { System.out.printf("Analyzed sentence sentiment: %s, positive score: %s, neutral score: %s, negative score: %s, length of sentence: %s, offset of sentence: %s.%n", sentenceSentiment.getTextSentimentClass(), sentenceSentiment.getPositiveScore(), sentenceSentiment.getNeutralScore(), sentenceSentiment.getNegativeScore(), sentenceSentiment.getLength(), sentenceSentiment.getOffset()); } } }, error -> System.err.println("There was an error analyzing sentiment of the text inputs." + error), () -> System.out.println("Batch of sentiment analyzed.")); try { TimeUnit.SECONDS.sleep(5); } catch (InterruptedException ignored) { } }
class AnalyzeSentimentBatchDocumentsAsync { /** * Main method to invoke this demo about how to analyze sentiment of a batch of text inputs. * * @param args Unused arguments to the program. */ }
class AnalyzeSentimentBatchDocumentsAsync { /** * Main method to invoke this demo about how to analyze the sentiments of a batch input text. * * @param args Unused arguments to the program. */ }
same to the async case that I mentioned above. Either create another API like `extractBatchKeyPhrases(inputs, requestOptions)` or use `extractBatchKeyPhrases(inputs)` as example, I think we should show the user how to use `requestOptions` so I don't prefer the second way.
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .subscriptionKey("<replace-with-your-text-analytics-key-here>") .endpoint("<replace-with-your-text-analytics-endpoint-here>") .buildClient(); List<TextDocumentInput> inputs = Arrays.asList( new TextDocumentInput("1", "My cat might need to see a veterinarian.", "en"), new TextDocumentInput("2", "The pitot tube is used to measure airspeed.", "en") ); final TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setShowStatistics(true); final DocumentResultCollection<ExtractKeyPhraseResult> extractedBatchResult = client.extractBatchKeyPhrasesWithResponse(inputs, requestOptions, Context.NONE).getValue(); System.out.printf("Model version: %s%n", extractedBatchResult.getModelVersion()); final TextDocumentBatchStatistics batchStatistics = extractedBatchResult.getStatistics(); System.out.printf("A batch of document statistics, document count: %s, erroneous document count: %s, transaction count: %s, valid document count: %s.%n", batchStatistics.getDocumentCount(), batchStatistics.getErroneousDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); for (ExtractKeyPhraseResult extractKeyPhraseResult : extractedBatchResult) { System.out.printf("Document ID: %s%n", extractKeyPhraseResult.getId()); final List<String> documentKeyPhrases = extractKeyPhraseResult.getKeyPhrases(); if (documentKeyPhrases == null) { System.out.printf("Cannot extract key phrases. Error: %s%n", extractKeyPhraseResult.getError().getMessage()); continue; } for (String keyPhrases : documentKeyPhrases) { System.out.printf("Extracted phrases: %s.%n", keyPhrases); } } }
final DocumentResultCollection<ExtractKeyPhraseResult> extractedBatchResult = client.extractBatchKeyPhrasesWithResponse(inputs, requestOptions, Context.NONE).getValue();
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .subscriptionKey("{subscription_key}") .endpoint("https: .buildClient(); List<TextDocumentInput> inputs = Arrays.asList( new TextDocumentInput("1", "My cat might need to see a veterinarian.", "en"), new TextDocumentInput("2", "The pitot tube is used to measure airspeed.", "en") ); final TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions().setShowStatistics(true); final DocumentResultCollection<ExtractKeyPhraseResult> extractedBatchResult = client.extractBatchKeyPhrasesWithResponse(inputs, requestOptions, Context.NONE).getValue(); System.out.printf("Model version: %s%n", extractedBatchResult.getModelVersion()); final TextDocumentBatchStatistics batchStatistics = extractedBatchResult.getStatistics(); System.out.printf("A batch of document statistics, document count: %s, erroneous document count: %s, transaction count: %s, valid document count: %s.%n", batchStatistics.getDocumentCount(), batchStatistics.getErroneousDocumentCount(), batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); for (ExtractKeyPhraseResult extractKeyPhraseResult : extractedBatchResult) { System.out.printf("Document ID: %s%n", extractKeyPhraseResult.getId()); if (extractKeyPhraseResult.isError()) { System.out.printf("Cannot extract key phrases. Error: %s%n", extractKeyPhraseResult.getError().getMessage()); continue; } for (String keyPhrases : extractKeyPhraseResult.getKeyPhrases()) { System.out.printf("Extracted phrases: %s.%n", keyPhrases); } } }
class ExtractKeyPhrasesBatchDocuments { /** * Main method to invoke this demo about how to extract key phrases of a batch of text inputs. * * @param args Unused arguments to the program. */ }
class ExtractKeyPhrasesBatchDocuments { /** * Main method to invoke this demo about how to extract the key phrases of a batch input text. * * @param args Unused arguments to the program. */ }
it would be easier to reason about if you chained `.addData(NAMESPACE...)` to the declaration above rather than here.
private Context startProcessTracingSpan(EventData eventData, String eventHubName, String fullyQualifiedNamespace) { Object diagnosticId = eventData.getProperties().get(DIAGNOSTIC_ID_KEY); if (diagnosticId == null || !tracerProvider.isEnabled()) { return Context.NONE; } Context spanContext = tracerProvider.extractContext(diagnosticId.toString(), Context.NONE) .addData(ENTITY_PATH_KEY, eventHubName).addData(HOST_NAME_KEY, fullyQualifiedNamespace); return tracerProvider.startSpan(spanContext.addData(AZ_NAMESPACE_KEY, AZ_NAMESPACE_VALUE), ProcessKind.PROCESS); }
return tracerProvider.startSpan(spanContext.addData(AZ_NAMESPACE_KEY, AZ_NAMESPACE_VALUE),
private Context startProcessTracingSpan(EventData eventData, String eventHubName, String fullyQualifiedNamespace) { Object diagnosticId = eventData.getProperties().get(DIAGNOSTIC_ID_KEY); if (diagnosticId == null || !tracerProvider.isEnabled()) { return Context.NONE; } Context spanContext = tracerProvider.extractContext(diagnosticId.toString(), Context.NONE) .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); return tracerProvider.startSpan(spanContext, ProcessKind.PROCESS); }
class PartitionPumpManager { private final ClientLogger logger = new ClientLogger(PartitionPumpManager.class); private final CheckpointStore checkpointStore; private final Map<String, EventHubConsumerAsyncClient> partitionPumps = new ConcurrentHashMap<>(); private final Supplier<PartitionProcessor> partitionProcessorFactory; private final EventHubClientBuilder eventHubClientBuilder; private final TracerProvider tracerProvider; private final boolean trackLastEnqueuedEventProperties; /** * Creates an instance of partition pump manager. * * @param checkpointStore The partition manager that is used to store and update checkpoints. * @param partitionProcessorFactory The partition processor factory that is used to create new instances of {@link * PartitionProcessor} when new partition pumps are started. * @param eventHubClientBuilder The client builder used to create new clients (and new connections) for each * partition processed by this {@link EventProcessorClient}. * @param trackLastEnqueuedEventProperties If set to {@code true}, all events received by this EventProcessorClient * will also include the last enqueued event properties for it's respective partitions. * @param tracerProvider The tracer implementation. */ PartitionPumpManager(CheckpointStore checkpointStore, Supplier<PartitionProcessor> partitionProcessorFactory, EventHubClientBuilder eventHubClientBuilder, boolean trackLastEnqueuedEventProperties, TracerProvider tracerProvider) { this.checkpointStore = checkpointStore; this.partitionProcessorFactory = partitionProcessorFactory; this.eventHubClientBuilder = eventHubClientBuilder; this.trackLastEnqueuedEventProperties = trackLastEnqueuedEventProperties; this.tracerProvider = tracerProvider; } /** * Stops all partition pumps that are actively consuming events. This method is invoked when the {@link * EventProcessorClient} is requested to stop. */ void stopAllPartitionPumps() { this.partitionPumps.forEach((partitionId, eventHubConsumer) -> { try { eventHubConsumer.close(); } catch (Exception ex) { logger.warning(Messages.FAILED_CLOSE_CONSUMER_PARTITION, partitionId, ex); } finally { partitionPumps.remove(partitionId); } }); } /** * Starts a new partition pump for the newly claimed partition. If the partition already has an active partition * pump, this will not create a new consumer. * * @param claimedOwnership The details of partition ownership for which new partition pump is requested to start. */ void startPartitionPump(PartitionOwnership claimedOwnership, Checkpoint checkpoint) { if (partitionPumps.containsKey(claimedOwnership.getPartitionId())) { logger.info("Consumer is already running for this partition {}", claimedOwnership.getPartitionId()); return; } PartitionContext partitionContext = new PartitionContext(claimedOwnership.getFullyQualifiedNamespace(), claimedOwnership.getEventHubName(), claimedOwnership.getConsumerGroup(), claimedOwnership.getPartitionId()); PartitionProcessor partitionProcessor = this.partitionProcessorFactory.get(); InitializationContext initializationContext = new InitializationContext(partitionContext, EventPosition.earliest()); partitionProcessor.initialize(initializationContext); EventPosition startFromEventPosition = null; if (checkpoint != null && checkpoint.getOffset() != null) { startFromEventPosition = EventPosition.fromOffset(checkpoint.getOffset()); } else if (checkpoint != null && checkpoint.getSequenceNumber() != null) { startFromEventPosition = EventPosition.fromSequenceNumber(checkpoint.getSequenceNumber()); } else { startFromEventPosition = initializationContext.getInitialPosition(); } ReceiveOptions receiveOptions = new ReceiveOptions().setOwnerLevel(0L) .setTrackLastEnqueuedEventProperties(trackLastEnqueuedEventProperties); EventHubConsumerAsyncClient eventHubConsumer = eventHubClientBuilder.buildAsyncClient() .createConsumer(claimedOwnership.getConsumerGroup(), EventHubClientBuilder.DEFAULT_PREFETCH_COUNT); partitionPumps.put(claimedOwnership.getPartitionId(), eventHubConsumer); eventHubConsumer.receiveFromPartition(claimedOwnership.getPartitionId(), startFromEventPosition, receiveOptions) .subscribe(partitionEvent -> { EventData eventData = partitionEvent.getData(); Context processSpanContext = startProcessTracingSpan(eventData, eventHubConsumer.getEventHubName(), eventHubConsumer.getFullyQualifiedNamespace()); if (processSpanContext.getData(SPAN_CONTEXT_KEY).isPresent()) { eventData.addContext(SPAN_CONTEXT_KEY, processSpanContext); } try { partitionProcessor.processEvent(new EventContext(partitionContext, eventData, checkpointStore, partitionEvent.getLastEnqueuedEventProperties())); endProcessTracingSpan(processSpanContext, Signal.complete()); } catch (Throwable throwable) { /* user code for event processing threw an exception - log and bubble up */ endProcessTracingSpan(processSpanContext, Signal.error(throwable)); throw logger.logExceptionAsError(new RuntimeException("Error in event processing callback", throwable)); } }, /* EventHubConsumer receive() returned an error */ ex -> handleReceiveError(claimedOwnership, eventHubConsumer, partitionProcessor, ex, partitionContext), () -> partitionProcessor.close(new CloseContext(partitionContext, CloseReason.EVENT_PROCESSOR_SHUTDOWN))); } private void handleReceiveError(PartitionOwnership claimedOwnership, EventHubConsumerAsyncClient eventHubConsumer, PartitionProcessor partitionProcessor, Throwable throwable, PartitionContext partitionContext) { try { logger.warning("Error receiving events for partition {}", partitionContext.getPartitionId(), throwable); partitionProcessor.processError(new ErrorContext(partitionContext, throwable)); CloseReason closeReason = CloseReason.LOST_PARTITION_OWNERSHIP; partitionProcessor.close(new CloseContext(partitionContext, closeReason)); } catch (Exception ex) { logger.warning(Messages.FAILED_PROCESSING_ERROR_RECEIVE, claimedOwnership.getPartitionId(), ex); } finally { try { eventHubConsumer.close(); } finally { partitionPumps.remove(claimedOwnership.getPartitionId()); } } } /* * Starts a new process tracing span and attaches the returned context to the EventData object for users. */ /* * Ends the process tracing span and the scope of that span. */ private void endProcessTracingSpan(Context processSpanContext, Signal<Void> signal) { Optional<Object> spanScope = processSpanContext.getData(SCOPE_KEY); if (!spanScope.isPresent() || !tracerProvider.isEnabled()) { return; } if (spanScope.get() instanceof Closeable) { Closeable close = (Closeable) processSpanContext.getData(SCOPE_KEY).get(); try { close.close(); tracerProvider.endSpan(processSpanContext, signal); } catch (IOException ioException) { logger.error(Messages.EVENT_PROCESSOR_RUN_END, ioException); } } else { logger.warning(String.format(Locale.US, Messages.PROCESS_SPAN_SCOPE_TYPE_ERROR, spanScope.get() != null ? spanScope.getClass() : "null")); } } }
class PartitionPumpManager { private final ClientLogger logger = new ClientLogger(PartitionPumpManager.class); private final CheckpointStore checkpointStore; private final Map<String, EventHubConsumerAsyncClient> partitionPumps = new ConcurrentHashMap<>(); private final Supplier<PartitionProcessor> partitionProcessorFactory; private final EventHubClientBuilder eventHubClientBuilder; private final TracerProvider tracerProvider; private final boolean trackLastEnqueuedEventProperties; private final Map<String, EventPosition> initialPartitionEventPosition; /** * Creates an instance of partition pump manager. * * @param checkpointStore The partition manager that is used to store and update checkpoints. * @param partitionProcessorFactory The partition processor factory that is used to create new instances of {@link * PartitionProcessor} when new partition pumps are started. * @param eventHubClientBuilder The client builder used to create new clients (and new connections) for each * partition processed by this {@link EventProcessorClient}. * @param trackLastEnqueuedEventProperties If set to {@code true}, all events received by this EventProcessorClient * will also include the last enqueued event properties for it's respective partitions. * @param tracerProvider The tracer implementation. * @param initialPartitionEventPosition Map of initial event positions for partition ids. */ PartitionPumpManager(CheckpointStore checkpointStore, Supplier<PartitionProcessor> partitionProcessorFactory, EventHubClientBuilder eventHubClientBuilder, boolean trackLastEnqueuedEventProperties, TracerProvider tracerProvider, Map<String, EventPosition> initialPartitionEventPosition) { this.checkpointStore = checkpointStore; this.partitionProcessorFactory = partitionProcessorFactory; this.eventHubClientBuilder = eventHubClientBuilder; this.trackLastEnqueuedEventProperties = trackLastEnqueuedEventProperties; this.tracerProvider = tracerProvider; this.initialPartitionEventPosition = initialPartitionEventPosition; } /** * Stops all partition pumps that are actively consuming events. This method is invoked when the {@link * EventProcessorClient} is requested to stop. */ void stopAllPartitionPumps() { this.partitionPumps.forEach((partitionId, eventHubConsumer) -> { try { eventHubConsumer.close(); } catch (Exception ex) { logger.warning(Messages.FAILED_CLOSE_CONSUMER_PARTITION, partitionId, ex); } finally { partitionPumps.remove(partitionId); } }); } /** * Starts a new partition pump for the newly claimed partition. If the partition already has an active partition * pump, this will not create a new consumer. * * @param claimedOwnership The details of partition ownership for which new partition pump is requested to start. */ void startPartitionPump(PartitionOwnership claimedOwnership, Checkpoint checkpoint) { if (partitionPumps.containsKey(claimedOwnership.getPartitionId())) { logger.verbose("Consumer is already running for this partition {}", claimedOwnership.getPartitionId()); return; } try { PartitionContext partitionContext = new PartitionContext(claimedOwnership.getFullyQualifiedNamespace(), claimedOwnership.getEventHubName(), claimedOwnership.getConsumerGroup(), claimedOwnership.getPartitionId()); PartitionProcessor partitionProcessor = this.partitionProcessorFactory.get(); InitializationContext initializationContext = new InitializationContext(partitionContext); partitionProcessor.initialize(initializationContext); EventPosition startFromEventPosition = null; if (checkpoint != null && checkpoint.getOffset() != null) { startFromEventPosition = EventPosition.fromOffset(checkpoint.getOffset()); } else if (checkpoint != null && checkpoint.getSequenceNumber() != null) { startFromEventPosition = EventPosition.fromSequenceNumber(checkpoint.getSequenceNumber()); } else if (initialPartitionEventPosition.containsKey(claimedOwnership.getPartitionId())) { startFromEventPosition = initialPartitionEventPosition.get(claimedOwnership.getPartitionId()); } else { startFromEventPosition = EventPosition.latest(); } logger.info("Starting event processing from {} for partition {}", startFromEventPosition, claimedOwnership.getPartitionId()); ReceiveOptions receiveOptions = new ReceiveOptions().setOwnerLevel(0L) .setTrackLastEnqueuedEventProperties(trackLastEnqueuedEventProperties); EventHubConsumerAsyncClient eventHubConsumer = eventHubClientBuilder.buildAsyncClient() .createConsumer(claimedOwnership.getConsumerGroup(), EventHubClientBuilder.DEFAULT_PREFETCH_COUNT); partitionPumps.put(claimedOwnership.getPartitionId(), eventHubConsumer); eventHubConsumer .receiveFromPartition(claimedOwnership.getPartitionId(), startFromEventPosition, receiveOptions) .subscribe(partitionEvent -> processEvent(partitionContext, partitionProcessor, eventHubConsumer, partitionEvent), /* EventHubConsumer receive() returned an error */ ex -> handleError(claimedOwnership, eventHubConsumer, partitionProcessor, ex, partitionContext), () -> { partitionProcessor.close(new CloseContext(partitionContext, CloseReason.EVENT_PROCESSOR_SHUTDOWN)); cleanup(claimedOwnership, eventHubConsumer); }); } catch (Exception ex) { if (partitionPumps.containsKey(claimedOwnership.getPartitionId())) { cleanup(claimedOwnership, partitionPumps.get(claimedOwnership.getPartitionId())); } throw logger.logExceptionAsError( new PartitionProcessorException( "Error occurred while starting partition pump for partition " + claimedOwnership.getPartitionId(), ex)); } } private void processEvent(PartitionContext partitionContext, PartitionProcessor partitionProcessor, EventHubConsumerAsyncClient eventHubConsumer, PartitionEvent partitionEvent) { EventData eventData = partitionEvent.getData(); Context processSpanContext = startProcessTracingSpan(eventData, eventHubConsumer.getEventHubName(), eventHubConsumer.getFullyQualifiedNamespace()); if (processSpanContext.getData(SPAN_CONTEXT_KEY).isPresent()) { eventData.addContext(SPAN_CONTEXT_KEY, processSpanContext); } try { partitionProcessor.processEvent(new EventContext(partitionContext, eventData, checkpointStore, partitionEvent.getLastEnqueuedEventProperties())); endProcessTracingSpan(processSpanContext, Signal.complete()); } catch (Throwable throwable) { /* user code for event processing threw an exception - log and bubble up */ endProcessTracingSpan(processSpanContext, Signal.error(throwable)); throw logger.logExceptionAsError(new PartitionProcessorException("Error in event processing callback", throwable)); } } Map<String, EventHubConsumerAsyncClient> getPartitionPumps() { return this.partitionPumps; } private void handleError(PartitionOwnership claimedOwnership, EventHubConsumerAsyncClient eventHubConsumer, PartitionProcessor partitionProcessor, Throwable throwable, PartitionContext partitionContext) { boolean shouldRethrow = true; if (!(throwable instanceof PartitionProcessorException)) { shouldRethrow = false; logger.warning("Error receiving events from partition {}", partitionContext.getPartitionId(), throwable); partitionProcessor.processError(new ErrorContext(partitionContext, throwable)); } CloseReason closeReason = CloseReason.LOST_PARTITION_OWNERSHIP; partitionProcessor.close(new CloseContext(partitionContext, closeReason)); cleanup(claimedOwnership, eventHubConsumer); if (shouldRethrow) { PartitionProcessorException exception = (PartitionProcessorException) throwable; throw logger.logExceptionAsError(exception); } } private void cleanup(PartitionOwnership claimedOwnership, EventHubConsumerAsyncClient eventHubConsumer) { try { logger.info("Closing consumer for partition id {}", claimedOwnership.getPartitionId()); eventHubConsumer.close(); } finally { logger.info("Removing partition id {} from list of processing partitions", claimedOwnership.getPartitionId()); partitionPumps.remove(claimedOwnership.getPartitionId()); } } /* * Starts a new process tracing span and attaches the returned context to the EventData object for users. */ /* * Ends the process tracing span and the scope of that span. */ private void endProcessTracingSpan(Context processSpanContext, Signal<Void> signal) { Optional<Object> spanScope = processSpanContext.getData(SCOPE_KEY); if (!spanScope.isPresent() || !tracerProvider.isEnabled()) { return; } if (spanScope.get() instanceof Closeable) { Closeable close = (Closeable) processSpanContext.getData(SCOPE_KEY).get(); try { close.close(); tracerProvider.endSpan(processSpanContext, signal); } catch (IOException ioException) { logger.error(Messages.EVENT_PROCESSOR_RUN_END, ioException); } } else { logger.warning(String.format(Locale.US, Messages.PROCESS_SPAN_SCOPE_TYPE_ERROR, spanScope.get() != null ? spanScope.getClass() : "null")); } } }
nit: It is a lot easier to understand all the method invocations by formatting it as: ```java finalSharedContext.addData(ENTITY_PATH_KEY, link.getEntityPath()) .addData(HOST_NAME_KEY, link.getHostname()) .addData(AZ_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); ``` Same comment as the ones below.
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.info("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.info("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.info("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } messages.add(message); } Context finalSharedContext = sharedContext; return getSendLink(batch.getPartitionId()) .flatMap(link -> { if (isTracingEnabled) { Context entityContext = finalSharedContext.addData(ENTITY_PATH_KEY, link.getEntityPath()) .addData(HOST_NAME_KEY, link.getHostname()).addData(AZ_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(entityContext, ProcessKind.SEND)); } return messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages); }).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); }
? new MessageAnnotations(new HashMap<>())
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } messages.add(message); } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); /** * Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service * load balance messages is the eventHubName. */ private final ConcurrentHashMap<String, AmqpSendLink> openLinks = new ConcurrentHashMap<>(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnection connection; private final AmqpRetryOptions retryOptions; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final boolean isSharedConnection; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnection connection, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, boolean isSharedConnection) { this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.eventHubName = eventHubName; this.connection = connection; this.retryOptions = retryOptions; this.tracerProvider = tracerProvider; this.messageSerializer = messageSerializer; this.isSharedConnection = isSharedConnection; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * * @return The set of information for the requested partition under the Event Hub this client is associated with. * * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connection.getManagementNode().flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private String getLinkName(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? StringUtil.getRandomString("EC") : StringUtil.getRandomString("PS"); } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final AmqpSendLink openLink = openLinks.get(entityPath); if (openLink != null) { return Mono.just(openLink); } else { return connection.createSendLink(getLinkName(partitionId), entityPath, retryOptions) .map(link -> openLinks.computeIfAbsent(entityPath, unusedKey -> link)); } } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } openLinks.forEach((key, value) -> value.close()); openLinks.clear(); if (!isSharedConnection) { connection.close(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> connection.createSendLink(linkName, entityPath, retryOptions)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (!isSharedConnection) { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
Should this add the attribute if the parsed name is `""`? It appear in the policy it doesn't.
public Context start(String spanName, Context context) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); Builder spanBuilder = getSpanBuilder(spanName, context); Span span = spanBuilder.startSpan(); if (span.isRecording()) { span.setAttribute(AZ_NAMESPACE_KEY, AttributeValue.stringAttributeValue(HttpTraceUtil.parseNamespaceProvider(spanName))); } return context.addData(PARENT_SPAN_KEY, span); }
AttributeValue.stringAttributeValue(HttpTraceUtil.parseNamespaceProvider(spanName)));
public Context start(String spanName, Context context) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); Builder spanBuilder = getSpanBuilder(spanName, context); Span span = spanBuilder.startSpan(); if (span.isRecording()) { String tracingNamespace = getOrDefault(context, AZ_TRACING_NAMESPACE_KEY, null, String.class); if (tracingNamespace != null) { span.setAttribute(AZ_NAMESPACE_KEY, AttributeValue.stringAttributeValue(tracingNamespace)); } } return context.addData(PARENT_SPAN_KEY, span); }
class OpenTelemetryTracer implements com.azure.core.util.tracing.Tracer { private static final Tracer TRACER = OpenTelemetry.getTracerFactory().get("Azure-OpenTelemetry"); static final String COMPONENT = "component"; static final String MESSAGE_BUS_DESTINATION = "message_bus.destination"; static final String PEER_ENDPOINT = "peer.address"; private final ClientLogger logger = new ClientLogger(OpenTelemetryTracer.class); /** * {@inheritDoc} */ @Override /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context, ProcessKind processKind) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); Objects.requireNonNull(processKind, "'processKind' cannot be null."); Span span; Builder spanBuilder; switch (processKind) { case SEND: spanBuilder = getOrDefault(context, SPAN_BUILDER_KEY, null, Builder.class); if (spanBuilder == null) { return Context.NONE; } span = spanBuilder.setSpanKind(Span.Kind.PRODUCER).startSpan(); if (span.isRecording()) { addSpanRequestAttributes(span, context, spanName); } return context.addData(PARENT_SPAN_KEY, span); case MESSAGE: spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.startSpan(); context = setContextData(span); return context.addData(PARENT_SPAN_KEY, span); case PROCESS: return startScopedSpan(spanName, context); default: return Context.NONE; } } /** * {@inheritDoc} */ @Override public void end(int responseCode, Throwable throwable, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span == null) { return; } if (span.isRecording()) { span.setStatus(HttpTraceUtil.parseResponseStatus(responseCode, throwable)); } span.end(); } /** * {@inheritDoc} */ @Override public void setAttribute(String key, String value, Context context) { if (CoreUtils.isNullOrEmpty(value)) { logger.warning("Failed to set span attribute since value is null or empty."); return; } final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span != null) { span.setAttribute(key, AttributeValue.stringAttributeValue(value)); } else { logger.warning("Failed to find span to add attribute."); } } /** * {@inheritDoc} */ @Override public Context setSpanName(String spanName, Context context) { return context.addData(USER_SPAN_NAME_KEY, spanName); } /** * {@inheritDoc} */ @Override public void end(String statusMessage, Throwable throwable, Context context) { final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span == null) { logger.warning("Failed to find span to end it."); return; } if (span.isRecording()) { span.setStatus(AmqpTraceUtil.parseStatusMessage(statusMessage, throwable)); } span.end(); } @Override public void addLink(Context context) { final Builder spanBuilder = getOrDefault(context, SPAN_BUILDER_KEY, null, Builder.class); if (spanBuilder == null) { logger.warning("Failed to find spanBuilder to link it."); return; } final SpanContext spanContext = getOrDefault(context, SPAN_CONTEXT_KEY, null, SpanContext.class); if (spanContext == null) { logger.warning("Failed to find span context to link it."); return; } spanBuilder.addLink(spanContext); } /** * {@inheritDoc} */ @Override public Context extractContext(String diagnosticId, Context context) { return AmqpPropagationFormatUtil.extractContext(diagnosticId, context); } @Override public Context getSharedSpanBuilder(String spanName, Context context) { return context.addData(SPAN_BUILDER_KEY, getSpanBuilder(spanName, context)); } /** * Starts a new child {@link Span} with parent being the remote and uses the {@link Span} is in the current Context, * to return an object that represents that scope. * <p>The scope is exited when the returned object is closed.</p> * * @param spanName The name of the returned Span. * @param context The {@link Context} containing the {@link SpanContext}. * @return The returned {@link Span} and the scope in a {@link Context} object. */ private Context startScopedSpan(String spanName, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); Span span; SpanContext spanContext = getOrDefault(context, SPAN_CONTEXT_KEY, null, SpanContext.class); if (spanContext != null) { span = startSpanWithRemoteParent(spanName, spanContext); } else { Builder spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.setSpanKind(Span.Kind.SERVER).startSpan(); } if (span.isRecording()) { addSpanRequestAttributes(span, context, spanName); } return context.addData(PARENT_SPAN_KEY, span).addData("scope", TRACER.withSpan(span)); } /** * Creates a {@link Builder} to create and start a new child {@link Span} with parent being the remote and * designated by the {@link SpanContext}. * * @param spanName The name of the returned Span. * @param spanContext The remote parent context of the returned Span. * @return A {@link Span} with parent being the remote {@link Span} designated by the {@link SpanContext}. */ private static Span startSpanWithRemoteParent(String spanName, SpanContext spanContext) { Builder spanBuilder = TRACER.spanBuilder(spanName).setParent(spanContext); spanBuilder.setSpanKind(Span.Kind.SERVER); return spanBuilder.startSpan(); } /** * Extracts the {@link SpanContext trace identifiers} and the {@link SpanContext} of the current tracing span as * text and returns in a {@link Context} object. * * @param span The current tracing span. * @return The {@link Context} containing the {@link SpanContext} and trace-parent of the * current span. */ private static Context setContextData(Span span) { SpanContext spanContext = span.getContext(); final String traceparent = AmqpPropagationFormatUtil.getDiagnosticId(spanContext); return new Context(DIAGNOSTIC_ID_KEY, traceparent).addData(SPAN_CONTEXT_KEY, spanContext); } /** * Extracts request attributes from the given {@code context} and adds it to the started span. * * @param span The span to which request attributes are to be added. * @param context The context containing the request attributes. * @param spanName The name of the returned Span containing the component value. */ private void addSpanRequestAttributes(Span span, Context context, String spanName) { Objects.requireNonNull(span, "'span' cannot be null."); span.setAttribute(COMPONENT, AttributeValue.stringAttributeValue(parseComponentValue(spanName))); span.setAttribute( MESSAGE_BUS_DESTINATION, AttributeValue.stringAttributeValue(getOrDefault(context, ENTITY_PATH_KEY, "", String.class))); span.setAttribute( PEER_ENDPOINT, AttributeValue.stringAttributeValue(getOrDefault(context, HOST_NAME_KEY, "", String.class))); } /** * Extracts the component name from the given span name. * * @param spanName The spanName containing the component name i.e spanName = "EventHubs.send" * @return The component name contained in the context i.e "eventhubs" */ private static String parseComponentValue(String spanName) { if (spanName != null && !spanName.isEmpty()) { int componentNameEndIndex = spanName.lastIndexOf("."); if (componentNameEndIndex != -1) { return spanName.substring(0, componentNameEndIndex); } } return ""; } /** * Returns a {@link Builder} to create and start a new child {@link Span} with parent being * the designated {@code Span}. * * @param spanName The name of the returned Span. * @param context The context containing the span and the span name. * @return A {@code Span.Builder} to create and start a new {@code Span}. */ private Builder getSpanBuilder(String spanName, Context context) { Span parentSpan = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); String spanNameKey = getOrDefault(context, USER_SPAN_NAME_KEY, null, String.class); if (spanNameKey == null) { spanNameKey = spanName; } if (parentSpan == null) { parentSpan = TRACER.getCurrentSpan(); } return TRACER.spanBuilder(spanNameKey).setParent(parentSpan); } /** * Returns the value of the specified key from the context. * * @param key The name of the attribute that needs to be extracted from the {@code Context}. * @param defaultValue the value to return in data not found. * @param clazz clazz the type of raw class to find data for. * @param context The context containing the specified key. * * @return The T type of raw class object */ @SuppressWarnings("unchecked") private <T> T getOrDefault(Context context, String key, T defaultValue, Class<T> clazz) { final Optional<Object> optional = context.getData(key); final Object result = optional.filter(value -> clazz.isAssignableFrom(value.getClass())).orElseGet(() -> { logger.warning("Could not extract key '{}' of type '{}' from context.", key, clazz); return defaultValue; }); return (T) result; } }
class OpenTelemetryTracer implements com.azure.core.util.tracing.Tracer { private static final Tracer TRACER = OpenTelemetry.getTracerFactory().get("Azure-OpenTelemetry"); static final String AZ_NAMESPACE_KEY = "az.namespace"; static final String COMPONENT = "component"; static final String MESSAGE_BUS_DESTINATION = "message_bus.destination"; static final String PEER_ENDPOINT = "peer.address"; private final ClientLogger logger = new ClientLogger(OpenTelemetryTracer.class); /** * {@inheritDoc} */ @Override /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context, ProcessKind processKind) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); Objects.requireNonNull(processKind, "'processKind' cannot be null."); Span span; Builder spanBuilder; switch (processKind) { case SEND: spanBuilder = getOrDefault(context, SPAN_BUILDER_KEY, null, Builder.class); if (spanBuilder == null) { return Context.NONE; } span = spanBuilder.setSpanKind(Span.Kind.PRODUCER).startSpan(); if (span.isRecording()) { addSpanRequestAttributes(span, context, spanName); } return context.addData(PARENT_SPAN_KEY, span); case MESSAGE: spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.startSpan(); context = setContextData(span); return context.addData(PARENT_SPAN_KEY, span); case PROCESS: return startScopedSpan(spanName, context); default: return Context.NONE; } } /** * {@inheritDoc} */ @Override public void end(int responseCode, Throwable throwable, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span == null) { return; } if (span.isRecording()) { span.setStatus(HttpTraceUtil.parseResponseStatus(responseCode, throwable)); } span.end(); } /** * {@inheritDoc} */ @Override public void setAttribute(String key, String value, Context context) { if (CoreUtils.isNullOrEmpty(value)) { logger.warning("Failed to set span attribute since value is null or empty."); return; } final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span != null) { span.setAttribute(key, AttributeValue.stringAttributeValue(value)); } else { logger.warning("Failed to find span to add attribute."); } } /** * {@inheritDoc} */ @Override public Context setSpanName(String spanName, Context context) { return context.addData(USER_SPAN_NAME_KEY, spanName); } /** * {@inheritDoc} */ @Override public void end(String statusMessage, Throwable throwable, Context context) { final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span == null) { logger.warning("Failed to find span to end it."); return; } if (span.isRecording()) { span.setStatus(AmqpTraceUtil.parseStatusMessage(statusMessage, throwable)); } span.end(); } @Override public void addLink(Context context) { final Builder spanBuilder = getOrDefault(context, SPAN_BUILDER_KEY, null, Builder.class); if (spanBuilder == null) { logger.warning("Failed to find spanBuilder to link it."); return; } final SpanContext spanContext = getOrDefault(context, SPAN_CONTEXT_KEY, null, SpanContext.class); if (spanContext == null) { logger.warning("Failed to find span context to link it."); return; } spanBuilder.addLink(spanContext); } /** * {@inheritDoc} */ @Override public Context extractContext(String diagnosticId, Context context) { return AmqpPropagationFormatUtil.extractContext(diagnosticId, context); } @Override public Context getSharedSpanBuilder(String spanName, Context context) { return context.addData(SPAN_BUILDER_KEY, getSpanBuilder(spanName, context)); } /** * Starts a new child {@link Span} with parent being the remote and uses the {@link Span} is in the current Context, * to return an object that represents that scope. * <p>The scope is exited when the returned object is closed.</p> * * @param spanName The name of the returned Span. * @param context The {@link Context} containing the {@link SpanContext}. * @return The returned {@link Span} and the scope in a {@link Context} object. */ private Context startScopedSpan(String spanName, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); Span span; SpanContext spanContext = getOrDefault(context, SPAN_CONTEXT_KEY, null, SpanContext.class); if (spanContext != null) { span = startSpanWithRemoteParent(spanName, spanContext); } else { Builder spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.setSpanKind(Span.Kind.SERVER).startSpan(); } if (span.isRecording()) { addSpanRequestAttributes(span, context, spanName); } return context.addData(PARENT_SPAN_KEY, span).addData("scope", TRACER.withSpan(span)); } /** * Creates a {@link Builder} to create and start a new child {@link Span} with parent being the remote and * designated by the {@link SpanContext}. * * @param spanName The name of the returned Span. * @param spanContext The remote parent context of the returned Span. * @return A {@link Span} with parent being the remote {@link Span} designated by the {@link SpanContext}. */ private static Span startSpanWithRemoteParent(String spanName, SpanContext spanContext) { Builder spanBuilder = TRACER.spanBuilder(spanName).setParent(spanContext); spanBuilder.setSpanKind(Span.Kind.SERVER); return spanBuilder.startSpan(); } /** * Extracts the {@link SpanContext trace identifiers} and the {@link SpanContext} of the current tracing span as * text and returns in a {@link Context} object. * * @param span The current tracing span. * @return The {@link Context} containing the {@link SpanContext} and trace-parent of the * current span. */ private static Context setContextData(Span span) { SpanContext spanContext = span.getContext(); final String traceparent = AmqpPropagationFormatUtil.getDiagnosticId(spanContext); return new Context(DIAGNOSTIC_ID_KEY, traceparent).addData(SPAN_CONTEXT_KEY, spanContext); } /** * Extracts request attributes from the given {@code context} and adds it to the started span. * * @param span The span to which request attributes are to be added. * @param context The context containing the request attributes. * @param spanName The name of the returned Span containing the component value. */ private void addSpanRequestAttributes(Span span, Context context, String spanName) { Objects.requireNonNull(span, "'span' cannot be null."); span.setAttribute(COMPONENT, AttributeValue.stringAttributeValue(parseComponentValue(spanName))); span.setAttribute( MESSAGE_BUS_DESTINATION, AttributeValue.stringAttributeValue(getOrDefault(context, ENTITY_PATH_KEY, "", String.class))); span.setAttribute( PEER_ENDPOINT, AttributeValue.stringAttributeValue(getOrDefault(context, HOST_NAME_KEY, "", String.class))); } /** * Extracts the component name from the given span name. * * @param spanName The spanName containing the component name i.e spanName = "EventHubs.send" * @return The component name contained in the context i.e "eventhubs" */ private static String parseComponentValue(String spanName) { if (spanName != null && !spanName.isEmpty()) { int componentNameEndIndex = spanName.lastIndexOf("."); if (componentNameEndIndex != -1) { return spanName.substring(0, componentNameEndIndex); } } return ""; } /** * Returns a {@link Builder} to create and start a new child {@link Span} with parent being * the designated {@code Span}. * * @param spanName The name of the returned Span. * @param context The context containing the span and the span name. * @return A {@code Span.Builder} to create and start a new {@code Span}. */ private Builder getSpanBuilder(String spanName, Context context) { Span parentSpan = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); String spanNameKey = getOrDefault(context, USER_SPAN_NAME_KEY, null, String.class); if (spanNameKey == null) { spanNameKey = spanName; } if (parentSpan == null) { parentSpan = TRACER.getCurrentSpan(); } return TRACER.spanBuilder(spanNameKey).setParent(parentSpan); } /** * Returns the value of the specified key from the context. * * @param key The name of the attribute that needs to be extracted from the {@code Context}. * @param defaultValue the value to return in data not found. * @param clazz clazz the type of raw class to find data for. * @param context The context containing the specified key. * * @return The T type of raw class object */ @SuppressWarnings("unchecked") private <T> T getOrDefault(Context context, String key, T defaultValue, Class<T> clazz) { final Optional<Object> optional = context.getData(key); final Object result = optional.filter(value -> clazz.isAssignableFrom(value.getClass())).orElseGet(() -> { logger.warning("Could not extract key '{}' of type '{}' from context.", key, clazz); return defaultValue; }); return (T) result; } }
updated it to not have the span attribute if it does not exist.
public Context start(String spanName, Context context) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); Builder spanBuilder = getSpanBuilder(spanName, context); Span span = spanBuilder.startSpan(); if (span.isRecording()) { span.setAttribute(AZ_NAMESPACE_KEY, AttributeValue.stringAttributeValue(HttpTraceUtil.parseNamespaceProvider(spanName))); } return context.addData(PARENT_SPAN_KEY, span); }
AttributeValue.stringAttributeValue(HttpTraceUtil.parseNamespaceProvider(spanName)));
public Context start(String spanName, Context context) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); Builder spanBuilder = getSpanBuilder(spanName, context); Span span = spanBuilder.startSpan(); if (span.isRecording()) { String tracingNamespace = getOrDefault(context, AZ_TRACING_NAMESPACE_KEY, null, String.class); if (tracingNamespace != null) { span.setAttribute(AZ_NAMESPACE_KEY, AttributeValue.stringAttributeValue(tracingNamespace)); } } return context.addData(PARENT_SPAN_KEY, span); }
class OpenTelemetryTracer implements com.azure.core.util.tracing.Tracer { private static final Tracer TRACER = OpenTelemetry.getTracerFactory().get("Azure-OpenTelemetry"); static final String COMPONENT = "component"; static final String MESSAGE_BUS_DESTINATION = "message_bus.destination"; static final String PEER_ENDPOINT = "peer.address"; private final ClientLogger logger = new ClientLogger(OpenTelemetryTracer.class); /** * {@inheritDoc} */ @Override /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context, ProcessKind processKind) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); Objects.requireNonNull(processKind, "'processKind' cannot be null."); Span span; Builder spanBuilder; switch (processKind) { case SEND: spanBuilder = getOrDefault(context, SPAN_BUILDER_KEY, null, Builder.class); if (spanBuilder == null) { return Context.NONE; } span = spanBuilder.setSpanKind(Span.Kind.PRODUCER).startSpan(); if (span.isRecording()) { addSpanRequestAttributes(span, context, spanName); } return context.addData(PARENT_SPAN_KEY, span); case MESSAGE: spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.startSpan(); context = setContextData(span); return context.addData(PARENT_SPAN_KEY, span); case PROCESS: return startScopedSpan(spanName, context); default: return Context.NONE; } } /** * {@inheritDoc} */ @Override public void end(int responseCode, Throwable throwable, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span == null) { return; } if (span.isRecording()) { span.setStatus(HttpTraceUtil.parseResponseStatus(responseCode, throwable)); } span.end(); } /** * {@inheritDoc} */ @Override public void setAttribute(String key, String value, Context context) { if (CoreUtils.isNullOrEmpty(value)) { logger.warning("Failed to set span attribute since value is null or empty."); return; } final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span != null) { span.setAttribute(key, AttributeValue.stringAttributeValue(value)); } else { logger.warning("Failed to find span to add attribute."); } } /** * {@inheritDoc} */ @Override public Context setSpanName(String spanName, Context context) { return context.addData(USER_SPAN_NAME_KEY, spanName); } /** * {@inheritDoc} */ @Override public void end(String statusMessage, Throwable throwable, Context context) { final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span == null) { logger.warning("Failed to find span to end it."); return; } if (span.isRecording()) { span.setStatus(AmqpTraceUtil.parseStatusMessage(statusMessage, throwable)); } span.end(); } @Override public void addLink(Context context) { final Builder spanBuilder = getOrDefault(context, SPAN_BUILDER_KEY, null, Builder.class); if (spanBuilder == null) { logger.warning("Failed to find spanBuilder to link it."); return; } final SpanContext spanContext = getOrDefault(context, SPAN_CONTEXT_KEY, null, SpanContext.class); if (spanContext == null) { logger.warning("Failed to find span context to link it."); return; } spanBuilder.addLink(spanContext); } /** * {@inheritDoc} */ @Override public Context extractContext(String diagnosticId, Context context) { return AmqpPropagationFormatUtil.extractContext(diagnosticId, context); } @Override public Context getSharedSpanBuilder(String spanName, Context context) { return context.addData(SPAN_BUILDER_KEY, getSpanBuilder(spanName, context)); } /** * Starts a new child {@link Span} with parent being the remote and uses the {@link Span} is in the current Context, * to return an object that represents that scope. * <p>The scope is exited when the returned object is closed.</p> * * @param spanName The name of the returned Span. * @param context The {@link Context} containing the {@link SpanContext}. * @return The returned {@link Span} and the scope in a {@link Context} object. */ private Context startScopedSpan(String spanName, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); Span span; SpanContext spanContext = getOrDefault(context, SPAN_CONTEXT_KEY, null, SpanContext.class); if (spanContext != null) { span = startSpanWithRemoteParent(spanName, spanContext); } else { Builder spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.setSpanKind(Span.Kind.SERVER).startSpan(); } if (span.isRecording()) { addSpanRequestAttributes(span, context, spanName); } return context.addData(PARENT_SPAN_KEY, span).addData("scope", TRACER.withSpan(span)); } /** * Creates a {@link Builder} to create and start a new child {@link Span} with parent being the remote and * designated by the {@link SpanContext}. * * @param spanName The name of the returned Span. * @param spanContext The remote parent context of the returned Span. * @return A {@link Span} with parent being the remote {@link Span} designated by the {@link SpanContext}. */ private static Span startSpanWithRemoteParent(String spanName, SpanContext spanContext) { Builder spanBuilder = TRACER.spanBuilder(spanName).setParent(spanContext); spanBuilder.setSpanKind(Span.Kind.SERVER); return spanBuilder.startSpan(); } /** * Extracts the {@link SpanContext trace identifiers} and the {@link SpanContext} of the current tracing span as * text and returns in a {@link Context} object. * * @param span The current tracing span. * @return The {@link Context} containing the {@link SpanContext} and trace-parent of the * current span. */ private static Context setContextData(Span span) { SpanContext spanContext = span.getContext(); final String traceparent = AmqpPropagationFormatUtil.getDiagnosticId(spanContext); return new Context(DIAGNOSTIC_ID_KEY, traceparent).addData(SPAN_CONTEXT_KEY, spanContext); } /** * Extracts request attributes from the given {@code context} and adds it to the started span. * * @param span The span to which request attributes are to be added. * @param context The context containing the request attributes. * @param spanName The name of the returned Span containing the component value. */ private void addSpanRequestAttributes(Span span, Context context, String spanName) { Objects.requireNonNull(span, "'span' cannot be null."); span.setAttribute(COMPONENT, AttributeValue.stringAttributeValue(parseComponentValue(spanName))); span.setAttribute( MESSAGE_BUS_DESTINATION, AttributeValue.stringAttributeValue(getOrDefault(context, ENTITY_PATH_KEY, "", String.class))); span.setAttribute( PEER_ENDPOINT, AttributeValue.stringAttributeValue(getOrDefault(context, HOST_NAME_KEY, "", String.class))); } /** * Extracts the component name from the given span name. * * @param spanName The spanName containing the component name i.e spanName = "EventHubs.send" * @return The component name contained in the context i.e "eventhubs" */ private static String parseComponentValue(String spanName) { if (spanName != null && !spanName.isEmpty()) { int componentNameEndIndex = spanName.lastIndexOf("."); if (componentNameEndIndex != -1) { return spanName.substring(0, componentNameEndIndex); } } return ""; } /** * Returns a {@link Builder} to create and start a new child {@link Span} with parent being * the designated {@code Span}. * * @param spanName The name of the returned Span. * @param context The context containing the span and the span name. * @return A {@code Span.Builder} to create and start a new {@code Span}. */ private Builder getSpanBuilder(String spanName, Context context) { Span parentSpan = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); String spanNameKey = getOrDefault(context, USER_SPAN_NAME_KEY, null, String.class); if (spanNameKey == null) { spanNameKey = spanName; } if (parentSpan == null) { parentSpan = TRACER.getCurrentSpan(); } return TRACER.spanBuilder(spanNameKey).setParent(parentSpan); } /** * Returns the value of the specified key from the context. * * @param key The name of the attribute that needs to be extracted from the {@code Context}. * @param defaultValue the value to return in data not found. * @param clazz clazz the type of raw class to find data for. * @param context The context containing the specified key. * * @return The T type of raw class object */ @SuppressWarnings("unchecked") private <T> T getOrDefault(Context context, String key, T defaultValue, Class<T> clazz) { final Optional<Object> optional = context.getData(key); final Object result = optional.filter(value -> clazz.isAssignableFrom(value.getClass())).orElseGet(() -> { logger.warning("Could not extract key '{}' of type '{}' from context.", key, clazz); return defaultValue; }); return (T) result; } }
class OpenTelemetryTracer implements com.azure.core.util.tracing.Tracer { private static final Tracer TRACER = OpenTelemetry.getTracerFactory().get("Azure-OpenTelemetry"); static final String AZ_NAMESPACE_KEY = "az.namespace"; static final String COMPONENT = "component"; static final String MESSAGE_BUS_DESTINATION = "message_bus.destination"; static final String PEER_ENDPOINT = "peer.address"; private final ClientLogger logger = new ClientLogger(OpenTelemetryTracer.class); /** * {@inheritDoc} */ @Override /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context, ProcessKind processKind) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); Objects.requireNonNull(processKind, "'processKind' cannot be null."); Span span; Builder spanBuilder; switch (processKind) { case SEND: spanBuilder = getOrDefault(context, SPAN_BUILDER_KEY, null, Builder.class); if (spanBuilder == null) { return Context.NONE; } span = spanBuilder.setSpanKind(Span.Kind.PRODUCER).startSpan(); if (span.isRecording()) { addSpanRequestAttributes(span, context, spanName); } return context.addData(PARENT_SPAN_KEY, span); case MESSAGE: spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.startSpan(); context = setContextData(span); return context.addData(PARENT_SPAN_KEY, span); case PROCESS: return startScopedSpan(spanName, context); default: return Context.NONE; } } /** * {@inheritDoc} */ @Override public void end(int responseCode, Throwable throwable, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span == null) { return; } if (span.isRecording()) { span.setStatus(HttpTraceUtil.parseResponseStatus(responseCode, throwable)); } span.end(); } /** * {@inheritDoc} */ @Override public void setAttribute(String key, String value, Context context) { if (CoreUtils.isNullOrEmpty(value)) { logger.warning("Failed to set span attribute since value is null or empty."); return; } final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span != null) { span.setAttribute(key, AttributeValue.stringAttributeValue(value)); } else { logger.warning("Failed to find span to add attribute."); } } /** * {@inheritDoc} */ @Override public Context setSpanName(String spanName, Context context) { return context.addData(USER_SPAN_NAME_KEY, spanName); } /** * {@inheritDoc} */ @Override public void end(String statusMessage, Throwable throwable, Context context) { final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span == null) { logger.warning("Failed to find span to end it."); return; } if (span.isRecording()) { span.setStatus(AmqpTraceUtil.parseStatusMessage(statusMessage, throwable)); } span.end(); } @Override public void addLink(Context context) { final Builder spanBuilder = getOrDefault(context, SPAN_BUILDER_KEY, null, Builder.class); if (spanBuilder == null) { logger.warning("Failed to find spanBuilder to link it."); return; } final SpanContext spanContext = getOrDefault(context, SPAN_CONTEXT_KEY, null, SpanContext.class); if (spanContext == null) { logger.warning("Failed to find span context to link it."); return; } spanBuilder.addLink(spanContext); } /** * {@inheritDoc} */ @Override public Context extractContext(String diagnosticId, Context context) { return AmqpPropagationFormatUtil.extractContext(diagnosticId, context); } @Override public Context getSharedSpanBuilder(String spanName, Context context) { return context.addData(SPAN_BUILDER_KEY, getSpanBuilder(spanName, context)); } /** * Starts a new child {@link Span} with parent being the remote and uses the {@link Span} is in the current Context, * to return an object that represents that scope. * <p>The scope is exited when the returned object is closed.</p> * * @param spanName The name of the returned Span. * @param context The {@link Context} containing the {@link SpanContext}. * @return The returned {@link Span} and the scope in a {@link Context} object. */ private Context startScopedSpan(String spanName, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); Span span; SpanContext spanContext = getOrDefault(context, SPAN_CONTEXT_KEY, null, SpanContext.class); if (spanContext != null) { span = startSpanWithRemoteParent(spanName, spanContext); } else { Builder spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.setSpanKind(Span.Kind.SERVER).startSpan(); } if (span.isRecording()) { addSpanRequestAttributes(span, context, spanName); } return context.addData(PARENT_SPAN_KEY, span).addData("scope", TRACER.withSpan(span)); } /** * Creates a {@link Builder} to create and start a new child {@link Span} with parent being the remote and * designated by the {@link SpanContext}. * * @param spanName The name of the returned Span. * @param spanContext The remote parent context of the returned Span. * @return A {@link Span} with parent being the remote {@link Span} designated by the {@link SpanContext}. */ private static Span startSpanWithRemoteParent(String spanName, SpanContext spanContext) { Builder spanBuilder = TRACER.spanBuilder(spanName).setParent(spanContext); spanBuilder.setSpanKind(Span.Kind.SERVER); return spanBuilder.startSpan(); } /** * Extracts the {@link SpanContext trace identifiers} and the {@link SpanContext} of the current tracing span as * text and returns in a {@link Context} object. * * @param span The current tracing span. * @return The {@link Context} containing the {@link SpanContext} and trace-parent of the * current span. */ private static Context setContextData(Span span) { SpanContext spanContext = span.getContext(); final String traceparent = AmqpPropagationFormatUtil.getDiagnosticId(spanContext); return new Context(DIAGNOSTIC_ID_KEY, traceparent).addData(SPAN_CONTEXT_KEY, spanContext); } /** * Extracts request attributes from the given {@code context} and adds it to the started span. * * @param span The span to which request attributes are to be added. * @param context The context containing the request attributes. * @param spanName The name of the returned Span containing the component value. */ private void addSpanRequestAttributes(Span span, Context context, String spanName) { Objects.requireNonNull(span, "'span' cannot be null."); span.setAttribute(COMPONENT, AttributeValue.stringAttributeValue(parseComponentValue(spanName))); span.setAttribute( MESSAGE_BUS_DESTINATION, AttributeValue.stringAttributeValue(getOrDefault(context, ENTITY_PATH_KEY, "", String.class))); span.setAttribute( PEER_ENDPOINT, AttributeValue.stringAttributeValue(getOrDefault(context, HOST_NAME_KEY, "", String.class))); } /** * Extracts the component name from the given span name. * * @param spanName The spanName containing the component name i.e spanName = "EventHubs.send" * @return The component name contained in the context i.e "eventhubs" */ private static String parseComponentValue(String spanName) { if (spanName != null && !spanName.isEmpty()) { int componentNameEndIndex = spanName.lastIndexOf("."); if (componentNameEndIndex != -1) { return spanName.substring(0, componentNameEndIndex); } } return ""; } /** * Returns a {@link Builder} to create and start a new child {@link Span} with parent being * the designated {@code Span}. * * @param spanName The name of the returned Span. * @param context The context containing the span and the span name. * @return A {@code Span.Builder} to create and start a new {@code Span}. */ private Builder getSpanBuilder(String spanName, Context context) { Span parentSpan = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); String spanNameKey = getOrDefault(context, USER_SPAN_NAME_KEY, null, String.class); if (spanNameKey == null) { spanNameKey = spanName; } if (parentSpan == null) { parentSpan = TRACER.getCurrentSpan(); } return TRACER.spanBuilder(spanNameKey).setParent(parentSpan); } /** * Returns the value of the specified key from the context. * * @param key The name of the attribute that needs to be extracted from the {@code Context}. * @param defaultValue the value to return in data not found. * @param clazz clazz the type of raw class to find data for. * @param context The context containing the specified key. * * @return The T type of raw class object */ @SuppressWarnings("unchecked") private <T> T getOrDefault(Context context, String key, T defaultValue, Class<T> clazz) { final Optional<Object> optional = context.getData(key); final Object result = optional.filter(value -> clazz.isAssignableFrom(value.getClass())).orElseGet(() -> { logger.warning("Could not extract key '{}' of type '{}' from context.", key, clazz); return defaultValue; }); return (T) result; } }
According to the design spec, null should throw NullPointerException and isEmpty should throw IllegalArgumentException. I'd split this into two steps. https://azure.github.io/azure-sdk/java_implementation.html#java-errors-system-errors
public ConfigurationClientBuilder connectionString(String connectionString) { if (CoreUtils.isNullOrEmpty(connectionString)) { throw logger.logExceptionAsError( new IllegalArgumentException("'connectionString' cannot be null or an empty string.")); } try { this.credential = new ConfigurationClientCredentials(connectionString); } catch (InvalidKeyException err) { throw logger.logExceptionAsError(new IllegalArgumentException( "The secret is invalid and cannot instantiate the HMAC-SHA256 algorithm.", err)); } catch (NoSuchAlgorithmException err) { throw logger.logExceptionAsError( new IllegalArgumentException("HMAC-SHA256 MAC algorithm cannot be instantiated.", err)); } this.endpoint = credential.getBaseUri(); return this; }
if (CoreUtils.isNullOrEmpty(connectionString)) {
public ConfigurationClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); if (connectionString.isEmpty()) { throw logger.logExceptionAsError( new IllegalArgumentException("'connectionString' cannot be an empty string.")); } try { this.credential = new ConfigurationClientCredentials(connectionString); } catch (InvalidKeyException err) { throw logger.logExceptionAsError(new IllegalArgumentException( "The secret contained within the connection string is invalid and cannot instantiate the HMAC-SHA256" + " algorithm.", err)); } catch (NoSuchAlgorithmException err) { throw logger.logExceptionAsError( new IllegalArgumentException("HMAC-SHA256 MAC algorithm cannot be instantiated.", err)); } this.endpoint = credential.getBaseUri(); return this; }
class ConfigurationClientBuilder { private static final String ECHO_REQUEST_ID_HEADER = "x-ms-return-client-request-id"; private static final String CONTENT_TYPE_HEADER = "Content-Type"; private static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private static final String ACCEPT_HEADER = "Accept"; private static final String ACCEPT_HEADER_VALUE = "application/vnd.microsoft.azconfig.kv+json"; private static final String APP_CONFIG_PROPERTIES = "azure-data-appconfiguration.properties"; private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final RetryPolicy DEFAULT_RETRY_POLICY = new RetryPolicy("retry-after-ms", ChronoUnit.MILLIS); private final ClientLogger logger = new ClientLogger(ConfigurationClientBuilder.class); private final List<HttpPipelinePolicy> policies; private final HttpHeaders headers; private final Map<String, String> properties; private ConfigurationClientCredentials credential; private TokenCredential tokenCredential; private String endpoint; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private HttpPipeline pipeline; private HttpPipelinePolicy retryPolicy; private Configuration configuration; private ConfigurationServiceVersion version; /** * The constructor with defaults. */ public ConfigurationClientBuilder() { policies = new ArrayList<>(); httpLogOptions = new HttpLogOptions(); properties = CoreUtils.getProperties(APP_CONFIG_PROPERTIES); headers = new HttpHeaders() .put(ECHO_REQUEST_ID_HEADER, "true") .put(CONTENT_TYPE_HEADER, CONTENT_TYPE_HEADER_VALUE) .put(ACCEPT_HEADER, ACCEPT_HEADER_VALUE); } /** * Creates a {@link ConfigurationClient} based on options set in the Builder. Every time {@code buildClient()} is * called a new instance of {@link ConfigurationClient} is created. * * <p> * If {@link * {@link * settings are ignored.</p> * * @return A ConfigurationClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ public ConfigurationClient buildClient() { return new ConfigurationClient(buildAsyncClient()); } /** * Creates a {@link ConfigurationAsyncClient} based on options set in the Builder. Every time * {@code buildAsyncClient()} is called a new instance of {@link ConfigurationAsyncClient} is created. * * <p> * If {@link * {@link * builder settings are ignored. * </p> * * @return A ConfigurationAsyncClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ public ConfigurationAsyncClient buildAsyncClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; ConfigurationServiceVersion serviceVersion = version != null ? version : ConfigurationServiceVersion.getLatest(); String buildEndpoint = endpoint; if (tokenCredential == null) { buildEndpoint = getBuildEndpoint(); } Objects.requireNonNull(buildEndpoint, "'Endpoint' is required and can not be null."); if (pipeline != null) { return new ConfigurationAsyncClient(buildEndpoint, pipeline, serviceVersion); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersFromContextPolicy()); policies.add(new AddHeadersPolicy(headers)); policies.add(new AddDatePolicy()); if (tokenCredential != null) { policies.add( new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", buildEndpoint))); } else if (credential != null) { policies.add(new ConfigurationCredentialsPolicy(credential)); } else { logger.logExceptionAsError( new IllegalArgumentException("Missing credential information while building a client.")); } HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? DEFAULT_RETRY_POLICY : retryPolicy); policies.addAll(this.policies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new ConfigurationAsyncClient(buildEndpoint, pipeline, serviceVersion); } /** * Sets the service endpoint for the Azure App Configuration instance. * * @param endpoint The URL of the Azure App Configuration instance to send {@link ConfigurationSetting} * service requests to and receive responses from. * @return The updated ConfigurationClientBuilder object. * @throws IllegalArgumentException if {@code endpoint} is null or it cannot be parsed into a valid URL. */ public ConfigurationClientBuilder endpoint(String endpoint) { try { new URL(endpoint); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL")); } this.endpoint = endpoint; return this; } /** * Sets the credential to use when authenticating HTTP requests. Also, sets the {@link * for this ConfigurationClientBuilder. * * @param connectionString Connection string in the format "endpoint={endpoint_value};id={id_value}; * secret={secret_value}" * @return The updated ConfigurationClientBuilder object. * @throws IllegalArgumentException if {@code connectionString} is null or an empty string. Or the secret is * invalid and cannot instantiate HMAC-SHA256 MAC algorithm. */ /** * Sets the {@link TokenCredential} used to authenticate HTTP requests. * * @param tokenCredential TokenCredential used to authenticate HTTP requests. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code credential} is {@code null}. */ public ConfigurationClientBuilder credential(TokenCredential tokenCredential) { Objects.requireNonNull(tokenCredential); this.tokenCredential = tokenCredential; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after required policies. * * @param policy The retry policy for service requests. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code policy} is {@code null}. */ public ConfigurationClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder httpClient(HttpClient client) { if (this.httpClient != null && client == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from * {@link ConfigurationClientBuilder * ConfigurationClient}. * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder pipeline(HttpPipeline pipeline) { if (this.pipeline != null && pipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link HttpPipelinePolicy} that is used when each request is sent. * * The default retry policy will be used if not provided {@link ConfigurationClientBuilder * to build {@link ConfigurationAsyncClient} or {@link ConfigurationClient}. * @param retryPolicy user's retry policy applied to each request. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder retryPolicy(HttpPipelinePolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link ConfigurationServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link ConfigurationServiceVersion} of the service to be used when making requests. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder serviceVersion(ConfigurationServiceVersion version) { this.version = version; return this; } private String getBuildEndpoint() { if (endpoint != null) { return endpoint; } else if (credential != null) { return credential.getBaseUri(); } else { return null; } } }
class ConfigurationClientBuilder { private static final String ECHO_REQUEST_ID_HEADER = "x-ms-return-client-request-id"; private static final String CONTENT_TYPE_HEADER = "Content-Type"; private static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private static final String ACCEPT_HEADER = "Accept"; private static final String ACCEPT_HEADER_VALUE = "application/vnd.microsoft.azconfig.kv+json"; private static final String APP_CONFIG_PROPERTIES = "azure-data-appconfiguration.properties"; private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final RetryPolicy DEFAULT_RETRY_POLICY = new RetryPolicy("retry-after-ms", ChronoUnit.MILLIS); private final ClientLogger logger = new ClientLogger(ConfigurationClientBuilder.class); private final List<HttpPipelinePolicy> policies; private final HttpHeaders headers; private final Map<String, String> properties; private ConfigurationClientCredentials credential; private TokenCredential tokenCredential; private String endpoint; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private HttpPipeline pipeline; private HttpPipelinePolicy retryPolicy; private Configuration configuration; private ConfigurationServiceVersion version; /** * The constructor with defaults. */ public ConfigurationClientBuilder() { policies = new ArrayList<>(); httpLogOptions = new HttpLogOptions(); properties = CoreUtils.getProperties(APP_CONFIG_PROPERTIES); headers = new HttpHeaders() .put(ECHO_REQUEST_ID_HEADER, "true") .put(CONTENT_TYPE_HEADER, CONTENT_TYPE_HEADER_VALUE) .put(ACCEPT_HEADER, ACCEPT_HEADER_VALUE); } /** * Creates a {@link ConfigurationClient} based on options set in the Builder. Every time {@code buildClient()} is * called a new instance of {@link ConfigurationClient} is created. * * <p> * If {@link * {@link * settings are ignored.</p> * * @return A ConfigurationClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ public ConfigurationClient buildClient() { return new ConfigurationClient(buildAsyncClient()); } /** * Creates a {@link ConfigurationAsyncClient} based on options set in the Builder. Every time * {@code buildAsyncClient()} is called a new instance of {@link ConfigurationAsyncClient} is created. * * <p> * If {@link * {@link * builder settings are ignored. * </p> * * @return A ConfigurationAsyncClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ public ConfigurationAsyncClient buildAsyncClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; ConfigurationServiceVersion serviceVersion = version != null ? version : ConfigurationServiceVersion.getLatest(); String buildEndpoint = endpoint; if (tokenCredential == null) { buildEndpoint = getBuildEndpoint(); } Objects.requireNonNull(buildEndpoint, "'Endpoint' is required and can not be null."); if (pipeline != null) { return new ConfigurationAsyncClient(buildEndpoint, pipeline, serviceVersion); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersFromContextPolicy()); policies.add(new AddHeadersPolicy(headers)); policies.add(new AddDatePolicy()); if (tokenCredential != null) { policies.add( new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", buildEndpoint))); } else if (credential != null) { policies.add(new ConfigurationCredentialsPolicy(credential)); } else { logger.logExceptionAsError( new IllegalArgumentException("Missing credential information while building a client.")); } HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? DEFAULT_RETRY_POLICY : retryPolicy); policies.addAll(this.policies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new ConfigurationAsyncClient(buildEndpoint, pipeline, serviceVersion); } /** * Sets the service endpoint for the Azure App Configuration instance. * * @param endpoint The URL of the Azure App Configuration instance to send {@link ConfigurationSetting} * service requests to and receive responses from. * @return The updated ConfigurationClientBuilder object. * @throws IllegalArgumentException if {@code endpoint} is null or it cannot be parsed into a valid URL. */ public ConfigurationClientBuilder endpoint(String endpoint) { try { new URL(endpoint); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL")); } this.endpoint = endpoint; return this; } /** * Sets the credential to use when authenticating HTTP requests. Also, sets the {@link * for this ConfigurationClientBuilder. * * @param connectionString Connection string in the format "endpoint={endpoint_value};id={id_value}; * secret={secret_value}" * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code connectionString} is {@code null}. * @throws IllegalArgumentException if {@code connectionString} is an empty string, the {@code connectionString} * secret is invalid, or the HMAC-SHA256 MAC algorithm cannot be instantiated. */ /** * Sets the {@link TokenCredential} used to authenticate HTTP requests. * * @param tokenCredential TokenCredential used to authenticate HTTP requests. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code credential} is {@code null}. */ public ConfigurationClientBuilder credential(TokenCredential tokenCredential) { Objects.requireNonNull(tokenCredential); this.tokenCredential = tokenCredential; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after required policies. * * @param policy The retry policy for service requests. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code policy} is {@code null}. */ public ConfigurationClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder httpClient(HttpClient client) { if (this.httpClient != null && client == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from * {@link ConfigurationClientBuilder * ConfigurationClient}. * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder pipeline(HttpPipeline pipeline) { if (this.pipeline != null && pipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link HttpPipelinePolicy} that is used when each request is sent. * * The default retry policy will be used if not provided {@link ConfigurationClientBuilder * to build {@link ConfigurationAsyncClient} or {@link ConfigurationClient}. * @param retryPolicy user's retry policy applied to each request. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder retryPolicy(HttpPipelinePolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link ConfigurationServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link ConfigurationServiceVersion} of the service to be used when making requests. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder serviceVersion(ConfigurationServiceVersion version) { this.version = version; return this; } private String getBuildEndpoint() { if (endpoint != null) { return endpoint; } else if (credential != null) { return credential.getBaseUri(); } else { return null; } } }
You can replace this with connectionString.isEmpty(). No need to do another null check in CoreUtils.
public ConfigurationClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); if (CoreUtils.isNullOrEmpty(connectionString)) { throw logger.logExceptionAsError( new IllegalArgumentException("'connectionString' cannot be an empty string.")); } try { this.credential = new ConfigurationClientCredentials(connectionString); } catch (InvalidKeyException err) { throw logger.logExceptionAsError(new IllegalArgumentException( "The secret is invalid and cannot instantiate the HMAC-SHA256 algorithm.", err)); } catch (NoSuchAlgorithmException err) { throw logger.logExceptionAsError( new IllegalArgumentException("HMAC-SHA256 MAC algorithm cannot be instantiated.", err)); } this.endpoint = credential.getBaseUri(); return this; }
if (CoreUtils.isNullOrEmpty(connectionString)) {
public ConfigurationClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); if (connectionString.isEmpty()) { throw logger.logExceptionAsError( new IllegalArgumentException("'connectionString' cannot be an empty string.")); } try { this.credential = new ConfigurationClientCredentials(connectionString); } catch (InvalidKeyException err) { throw logger.logExceptionAsError(new IllegalArgumentException( "The secret contained within the connection string is invalid and cannot instantiate the HMAC-SHA256" + " algorithm.", err)); } catch (NoSuchAlgorithmException err) { throw logger.logExceptionAsError( new IllegalArgumentException("HMAC-SHA256 MAC algorithm cannot be instantiated.", err)); } this.endpoint = credential.getBaseUri(); return this; }
class ConfigurationClientBuilder { private static final String ECHO_REQUEST_ID_HEADER = "x-ms-return-client-request-id"; private static final String CONTENT_TYPE_HEADER = "Content-Type"; private static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private static final String ACCEPT_HEADER = "Accept"; private static final String ACCEPT_HEADER_VALUE = "application/vnd.microsoft.azconfig.kv+json"; private static final String APP_CONFIG_PROPERTIES = "azure-data-appconfiguration.properties"; private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final RetryPolicy DEFAULT_RETRY_POLICY = new RetryPolicy("retry-after-ms", ChronoUnit.MILLIS); private final ClientLogger logger = new ClientLogger(ConfigurationClientBuilder.class); private final List<HttpPipelinePolicy> policies; private final HttpHeaders headers; private final Map<String, String> properties; private ConfigurationClientCredentials credential; private TokenCredential tokenCredential; private String endpoint; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private HttpPipeline pipeline; private HttpPipelinePolicy retryPolicy; private Configuration configuration; private ConfigurationServiceVersion version; /** * The constructor with defaults. */ public ConfigurationClientBuilder() { policies = new ArrayList<>(); httpLogOptions = new HttpLogOptions(); properties = CoreUtils.getProperties(APP_CONFIG_PROPERTIES); headers = new HttpHeaders() .put(ECHO_REQUEST_ID_HEADER, "true") .put(CONTENT_TYPE_HEADER, CONTENT_TYPE_HEADER_VALUE) .put(ACCEPT_HEADER, ACCEPT_HEADER_VALUE); } /** * Creates a {@link ConfigurationClient} based on options set in the Builder. Every time {@code buildClient()} is * called a new instance of {@link ConfigurationClient} is created. * * <p> * If {@link * {@link * settings are ignored.</p> * * @return A ConfigurationClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ public ConfigurationClient buildClient() { return new ConfigurationClient(buildAsyncClient()); } /** * Creates a {@link ConfigurationAsyncClient} based on options set in the Builder. Every time * {@code buildAsyncClient()} is called a new instance of {@link ConfigurationAsyncClient} is created. * * <p> * If {@link * {@link * builder settings are ignored. * </p> * * @return A ConfigurationAsyncClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ public ConfigurationAsyncClient buildAsyncClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; ConfigurationServiceVersion serviceVersion = version != null ? version : ConfigurationServiceVersion.getLatest(); String buildEndpoint = endpoint; if (tokenCredential == null) { buildEndpoint = getBuildEndpoint(); } Objects.requireNonNull(buildEndpoint, "'Endpoint' is required and can not be null."); if (pipeline != null) { return new ConfigurationAsyncClient(buildEndpoint, pipeline, serviceVersion); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersFromContextPolicy()); policies.add(new AddHeadersPolicy(headers)); policies.add(new AddDatePolicy()); if (tokenCredential != null) { policies.add( new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", buildEndpoint))); } else if (credential != null) { policies.add(new ConfigurationCredentialsPolicy(credential)); } else { logger.logExceptionAsError( new IllegalArgumentException("Missing credential information while building a client.")); } HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? DEFAULT_RETRY_POLICY : retryPolicy); policies.addAll(this.policies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new ConfigurationAsyncClient(buildEndpoint, pipeline, serviceVersion); } /** * Sets the service endpoint for the Azure App Configuration instance. * * @param endpoint The URL of the Azure App Configuration instance to send {@link ConfigurationSetting} * service requests to and receive responses from. * @return The updated ConfigurationClientBuilder object. * @throws IllegalArgumentException if {@code endpoint} is null or it cannot be parsed into a valid URL. */ public ConfigurationClientBuilder endpoint(String endpoint) { try { new URL(endpoint); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL")); } this.endpoint = endpoint; return this; } /** * Sets the credential to use when authenticating HTTP requests. Also, sets the {@link * for this ConfigurationClientBuilder. * * @param connectionString Connection string in the format "endpoint={endpoint_value};id={id_value}; * secret={secret_value}" * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code connectionString} is {@code null}. * @throws IllegalArgumentException if {@code connectionString} is an empty string. Or the secret is invalid and * cannot instantiate HMAC-SHA256 MAC algorithm. */ /** * Sets the {@link TokenCredential} used to authenticate HTTP requests. * * @param tokenCredential TokenCredential used to authenticate HTTP requests. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code credential} is {@code null}. */ public ConfigurationClientBuilder credential(TokenCredential tokenCredential) { Objects.requireNonNull(tokenCredential); this.tokenCredential = tokenCredential; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after required policies. * * @param policy The retry policy for service requests. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code policy} is {@code null}. */ public ConfigurationClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder httpClient(HttpClient client) { if (this.httpClient != null && client == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from * {@link ConfigurationClientBuilder * ConfigurationClient}. * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder pipeline(HttpPipeline pipeline) { if (this.pipeline != null && pipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link HttpPipelinePolicy} that is used when each request is sent. * * The default retry policy will be used if not provided {@link ConfigurationClientBuilder * to build {@link ConfigurationAsyncClient} or {@link ConfigurationClient}. * @param retryPolicy user's retry policy applied to each request. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder retryPolicy(HttpPipelinePolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link ConfigurationServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link ConfigurationServiceVersion} of the service to be used when making requests. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder serviceVersion(ConfigurationServiceVersion version) { this.version = version; return this; } private String getBuildEndpoint() { if (endpoint != null) { return endpoint; } else if (credential != null) { return credential.getBaseUri(); } else { return null; } } }
class ConfigurationClientBuilder { private static final String ECHO_REQUEST_ID_HEADER = "x-ms-return-client-request-id"; private static final String CONTENT_TYPE_HEADER = "Content-Type"; private static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private static final String ACCEPT_HEADER = "Accept"; private static final String ACCEPT_HEADER_VALUE = "application/vnd.microsoft.azconfig.kv+json"; private static final String APP_CONFIG_PROPERTIES = "azure-data-appconfiguration.properties"; private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final RetryPolicy DEFAULT_RETRY_POLICY = new RetryPolicy("retry-after-ms", ChronoUnit.MILLIS); private final ClientLogger logger = new ClientLogger(ConfigurationClientBuilder.class); private final List<HttpPipelinePolicy> policies; private final HttpHeaders headers; private final Map<String, String> properties; private ConfigurationClientCredentials credential; private TokenCredential tokenCredential; private String endpoint; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private HttpPipeline pipeline; private HttpPipelinePolicy retryPolicy; private Configuration configuration; private ConfigurationServiceVersion version; /** * The constructor with defaults. */ public ConfigurationClientBuilder() { policies = new ArrayList<>(); httpLogOptions = new HttpLogOptions(); properties = CoreUtils.getProperties(APP_CONFIG_PROPERTIES); headers = new HttpHeaders() .put(ECHO_REQUEST_ID_HEADER, "true") .put(CONTENT_TYPE_HEADER, CONTENT_TYPE_HEADER_VALUE) .put(ACCEPT_HEADER, ACCEPT_HEADER_VALUE); } /** * Creates a {@link ConfigurationClient} based on options set in the Builder. Every time {@code buildClient()} is * called a new instance of {@link ConfigurationClient} is created. * * <p> * If {@link * {@link * settings are ignored.</p> * * @return A ConfigurationClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ public ConfigurationClient buildClient() { return new ConfigurationClient(buildAsyncClient()); } /** * Creates a {@link ConfigurationAsyncClient} based on options set in the Builder. Every time * {@code buildAsyncClient()} is called a new instance of {@link ConfigurationAsyncClient} is created. * * <p> * If {@link * {@link * builder settings are ignored. * </p> * * @return A ConfigurationAsyncClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ public ConfigurationAsyncClient buildAsyncClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; ConfigurationServiceVersion serviceVersion = version != null ? version : ConfigurationServiceVersion.getLatest(); String buildEndpoint = endpoint; if (tokenCredential == null) { buildEndpoint = getBuildEndpoint(); } Objects.requireNonNull(buildEndpoint, "'Endpoint' is required and can not be null."); if (pipeline != null) { return new ConfigurationAsyncClient(buildEndpoint, pipeline, serviceVersion); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersFromContextPolicy()); policies.add(new AddHeadersPolicy(headers)); policies.add(new AddDatePolicy()); if (tokenCredential != null) { policies.add( new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", buildEndpoint))); } else if (credential != null) { policies.add(new ConfigurationCredentialsPolicy(credential)); } else { logger.logExceptionAsError( new IllegalArgumentException("Missing credential information while building a client.")); } HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? DEFAULT_RETRY_POLICY : retryPolicy); policies.addAll(this.policies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new ConfigurationAsyncClient(buildEndpoint, pipeline, serviceVersion); } /** * Sets the service endpoint for the Azure App Configuration instance. * * @param endpoint The URL of the Azure App Configuration instance to send {@link ConfigurationSetting} * service requests to and receive responses from. * @return The updated ConfigurationClientBuilder object. * @throws IllegalArgumentException if {@code endpoint} is null or it cannot be parsed into a valid URL. */ public ConfigurationClientBuilder endpoint(String endpoint) { try { new URL(endpoint); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL")); } this.endpoint = endpoint; return this; } /** * Sets the credential to use when authenticating HTTP requests. Also, sets the {@link * for this ConfigurationClientBuilder. * * @param connectionString Connection string in the format "endpoint={endpoint_value};id={id_value}; * secret={secret_value}" * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code connectionString} is {@code null}. * @throws IllegalArgumentException if {@code connectionString} is an empty string, the {@code connectionString} * secret is invalid, or the HMAC-SHA256 MAC algorithm cannot be instantiated. */ /** * Sets the {@link TokenCredential} used to authenticate HTTP requests. * * @param tokenCredential TokenCredential used to authenticate HTTP requests. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code credential} is {@code null}. */ public ConfigurationClientBuilder credential(TokenCredential tokenCredential) { Objects.requireNonNull(tokenCredential); this.tokenCredential = tokenCredential; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after required policies. * * @param policy The retry policy for service requests. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code policy} is {@code null}. */ public ConfigurationClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder httpClient(HttpClient client) { if (this.httpClient != null && client == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from * {@link ConfigurationClientBuilder * ConfigurationClient}. * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder pipeline(HttpPipeline pipeline) { if (this.pipeline != null && pipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link HttpPipelinePolicy} that is used when each request is sent. * * The default retry policy will be used if not provided {@link ConfigurationClientBuilder * to build {@link ConfigurationAsyncClient} or {@link ConfigurationClient}. * @param retryPolicy user's retry policy applied to each request. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder retryPolicy(HttpPipelinePolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link ConfigurationServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link ConfigurationServiceVersion} of the service to be used when making requests. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder serviceVersion(ConfigurationServiceVersion version) { this.version = version; return this; } private String getBuildEndpoint() { if (endpoint != null) { return endpoint; } else if (credential != null) { return credential.getBaseUri(); } else { return null; } } }
Should we add in that the secret contained within the connection string is invalid? Right now this is too vague and being thrown from a black box scenario for anyone consuming this SDK.
public ConfigurationClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); if (CoreUtils.isNullOrEmpty(connectionString)) { throw logger.logExceptionAsError( new IllegalArgumentException("'connectionString' cannot be an empty string.")); } try { this.credential = new ConfigurationClientCredentials(connectionString); } catch (InvalidKeyException err) { throw logger.logExceptionAsError(new IllegalArgumentException( "The secret is invalid and cannot instantiate the HMAC-SHA256 algorithm.", err)); } catch (NoSuchAlgorithmException err) { throw logger.logExceptionAsError( new IllegalArgumentException("HMAC-SHA256 MAC algorithm cannot be instantiated.", err)); } this.endpoint = credential.getBaseUri(); return this; }
"The secret is invalid and cannot instantiate the HMAC-SHA256 algorithm.", err));
public ConfigurationClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); if (connectionString.isEmpty()) { throw logger.logExceptionAsError( new IllegalArgumentException("'connectionString' cannot be an empty string.")); } try { this.credential = new ConfigurationClientCredentials(connectionString); } catch (InvalidKeyException err) { throw logger.logExceptionAsError(new IllegalArgumentException( "The secret contained within the connection string is invalid and cannot instantiate the HMAC-SHA256" + " algorithm.", err)); } catch (NoSuchAlgorithmException err) { throw logger.logExceptionAsError( new IllegalArgumentException("HMAC-SHA256 MAC algorithm cannot be instantiated.", err)); } this.endpoint = credential.getBaseUri(); return this; }
class ConfigurationClientBuilder { private static final String ECHO_REQUEST_ID_HEADER = "x-ms-return-client-request-id"; private static final String CONTENT_TYPE_HEADER = "Content-Type"; private static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private static final String ACCEPT_HEADER = "Accept"; private static final String ACCEPT_HEADER_VALUE = "application/vnd.microsoft.azconfig.kv+json"; private static final String APP_CONFIG_PROPERTIES = "azure-data-appconfiguration.properties"; private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final RetryPolicy DEFAULT_RETRY_POLICY = new RetryPolicy("retry-after-ms", ChronoUnit.MILLIS); private final ClientLogger logger = new ClientLogger(ConfigurationClientBuilder.class); private final List<HttpPipelinePolicy> policies; private final HttpHeaders headers; private final Map<String, String> properties; private ConfigurationClientCredentials credential; private TokenCredential tokenCredential; private String endpoint; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private HttpPipeline pipeline; private HttpPipelinePolicy retryPolicy; private Configuration configuration; private ConfigurationServiceVersion version; /** * The constructor with defaults. */ public ConfigurationClientBuilder() { policies = new ArrayList<>(); httpLogOptions = new HttpLogOptions(); properties = CoreUtils.getProperties(APP_CONFIG_PROPERTIES); headers = new HttpHeaders() .put(ECHO_REQUEST_ID_HEADER, "true") .put(CONTENT_TYPE_HEADER, CONTENT_TYPE_HEADER_VALUE) .put(ACCEPT_HEADER, ACCEPT_HEADER_VALUE); } /** * Creates a {@link ConfigurationClient} based on options set in the Builder. Every time {@code buildClient()} is * called a new instance of {@link ConfigurationClient} is created. * * <p> * If {@link * {@link * settings are ignored.</p> * * @return A ConfigurationClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ public ConfigurationClient buildClient() { return new ConfigurationClient(buildAsyncClient()); } /** * Creates a {@link ConfigurationAsyncClient} based on options set in the Builder. Every time * {@code buildAsyncClient()} is called a new instance of {@link ConfigurationAsyncClient} is created. * * <p> * If {@link * {@link * builder settings are ignored. * </p> * * @return A ConfigurationAsyncClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ public ConfigurationAsyncClient buildAsyncClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; ConfigurationServiceVersion serviceVersion = version != null ? version : ConfigurationServiceVersion.getLatest(); String buildEndpoint = endpoint; if (tokenCredential == null) { buildEndpoint = getBuildEndpoint(); } Objects.requireNonNull(buildEndpoint, "'Endpoint' is required and can not be null."); if (pipeline != null) { return new ConfigurationAsyncClient(buildEndpoint, pipeline, serviceVersion); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersFromContextPolicy()); policies.add(new AddHeadersPolicy(headers)); policies.add(new AddDatePolicy()); if (tokenCredential != null) { policies.add( new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", buildEndpoint))); } else if (credential != null) { policies.add(new ConfigurationCredentialsPolicy(credential)); } else { logger.logExceptionAsError( new IllegalArgumentException("Missing credential information while building a client.")); } HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? DEFAULT_RETRY_POLICY : retryPolicy); policies.addAll(this.policies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new ConfigurationAsyncClient(buildEndpoint, pipeline, serviceVersion); } /** * Sets the service endpoint for the Azure App Configuration instance. * * @param endpoint The URL of the Azure App Configuration instance to send {@link ConfigurationSetting} * service requests to and receive responses from. * @return The updated ConfigurationClientBuilder object. * @throws IllegalArgumentException if {@code endpoint} is null or it cannot be parsed into a valid URL. */ public ConfigurationClientBuilder endpoint(String endpoint) { try { new URL(endpoint); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL")); } this.endpoint = endpoint; return this; } /** * Sets the credential to use when authenticating HTTP requests. Also, sets the {@link * for this ConfigurationClientBuilder. * * @param connectionString Connection string in the format "endpoint={endpoint_value};id={id_value}; * secret={secret_value}" * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code connectionString} is {@code null}. * @throws IllegalArgumentException if {@code connectionString} is an empty string. Or the secret is invalid and * cannot instantiate HMAC-SHA256 MAC algorithm. */ /** * Sets the {@link TokenCredential} used to authenticate HTTP requests. * * @param tokenCredential TokenCredential used to authenticate HTTP requests. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code credential} is {@code null}. */ public ConfigurationClientBuilder credential(TokenCredential tokenCredential) { Objects.requireNonNull(tokenCredential); this.tokenCredential = tokenCredential; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after required policies. * * @param policy The retry policy for service requests. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code policy} is {@code null}. */ public ConfigurationClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder httpClient(HttpClient client) { if (this.httpClient != null && client == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from * {@link ConfigurationClientBuilder * ConfigurationClient}. * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder pipeline(HttpPipeline pipeline) { if (this.pipeline != null && pipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link HttpPipelinePolicy} that is used when each request is sent. * * The default retry policy will be used if not provided {@link ConfigurationClientBuilder * to build {@link ConfigurationAsyncClient} or {@link ConfigurationClient}. * @param retryPolicy user's retry policy applied to each request. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder retryPolicy(HttpPipelinePolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link ConfigurationServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link ConfigurationServiceVersion} of the service to be used when making requests. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder serviceVersion(ConfigurationServiceVersion version) { this.version = version; return this; } private String getBuildEndpoint() { if (endpoint != null) { return endpoint; } else if (credential != null) { return credential.getBaseUri(); } else { return null; } } }
class ConfigurationClientBuilder { private static final String ECHO_REQUEST_ID_HEADER = "x-ms-return-client-request-id"; private static final String CONTENT_TYPE_HEADER = "Content-Type"; private static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private static final String ACCEPT_HEADER = "Accept"; private static final String ACCEPT_HEADER_VALUE = "application/vnd.microsoft.azconfig.kv+json"; private static final String APP_CONFIG_PROPERTIES = "azure-data-appconfiguration.properties"; private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final RetryPolicy DEFAULT_RETRY_POLICY = new RetryPolicy("retry-after-ms", ChronoUnit.MILLIS); private final ClientLogger logger = new ClientLogger(ConfigurationClientBuilder.class); private final List<HttpPipelinePolicy> policies; private final HttpHeaders headers; private final Map<String, String> properties; private ConfigurationClientCredentials credential; private TokenCredential tokenCredential; private String endpoint; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private HttpPipeline pipeline; private HttpPipelinePolicy retryPolicy; private Configuration configuration; private ConfigurationServiceVersion version; /** * The constructor with defaults. */ public ConfigurationClientBuilder() { policies = new ArrayList<>(); httpLogOptions = new HttpLogOptions(); properties = CoreUtils.getProperties(APP_CONFIG_PROPERTIES); headers = new HttpHeaders() .put(ECHO_REQUEST_ID_HEADER, "true") .put(CONTENT_TYPE_HEADER, CONTENT_TYPE_HEADER_VALUE) .put(ACCEPT_HEADER, ACCEPT_HEADER_VALUE); } /** * Creates a {@link ConfigurationClient} based on options set in the Builder. Every time {@code buildClient()} is * called a new instance of {@link ConfigurationClient} is created. * * <p> * If {@link * {@link * settings are ignored.</p> * * @return A ConfigurationClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ public ConfigurationClient buildClient() { return new ConfigurationClient(buildAsyncClient()); } /** * Creates a {@link ConfigurationAsyncClient} based on options set in the Builder. Every time * {@code buildAsyncClient()} is called a new instance of {@link ConfigurationAsyncClient} is created. * * <p> * If {@link * {@link * builder settings are ignored. * </p> * * @return A ConfigurationAsyncClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ public ConfigurationAsyncClient buildAsyncClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; ConfigurationServiceVersion serviceVersion = version != null ? version : ConfigurationServiceVersion.getLatest(); String buildEndpoint = endpoint; if (tokenCredential == null) { buildEndpoint = getBuildEndpoint(); } Objects.requireNonNull(buildEndpoint, "'Endpoint' is required and can not be null."); if (pipeline != null) { return new ConfigurationAsyncClient(buildEndpoint, pipeline, serviceVersion); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersFromContextPolicy()); policies.add(new AddHeadersPolicy(headers)); policies.add(new AddDatePolicy()); if (tokenCredential != null) { policies.add( new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", buildEndpoint))); } else if (credential != null) { policies.add(new ConfigurationCredentialsPolicy(credential)); } else { logger.logExceptionAsError( new IllegalArgumentException("Missing credential information while building a client.")); } HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? DEFAULT_RETRY_POLICY : retryPolicy); policies.addAll(this.policies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new ConfigurationAsyncClient(buildEndpoint, pipeline, serviceVersion); } /** * Sets the service endpoint for the Azure App Configuration instance. * * @param endpoint The URL of the Azure App Configuration instance to send {@link ConfigurationSetting} * service requests to and receive responses from. * @return The updated ConfigurationClientBuilder object. * @throws IllegalArgumentException if {@code endpoint} is null or it cannot be parsed into a valid URL. */ public ConfigurationClientBuilder endpoint(String endpoint) { try { new URL(endpoint); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL")); } this.endpoint = endpoint; return this; } /** * Sets the credential to use when authenticating HTTP requests. Also, sets the {@link * for this ConfigurationClientBuilder. * * @param connectionString Connection string in the format "endpoint={endpoint_value};id={id_value}; * secret={secret_value}" * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code connectionString} is {@code null}. * @throws IllegalArgumentException if {@code connectionString} is an empty string, the {@code connectionString} * secret is invalid, or the HMAC-SHA256 MAC algorithm cannot be instantiated. */ /** * Sets the {@link TokenCredential} used to authenticate HTTP requests. * * @param tokenCredential TokenCredential used to authenticate HTTP requests. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code credential} is {@code null}. */ public ConfigurationClientBuilder credential(TokenCredential tokenCredential) { Objects.requireNonNull(tokenCredential); this.tokenCredential = tokenCredential; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after required policies. * * @param policy The retry policy for service requests. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code policy} is {@code null}. */ public ConfigurationClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder httpClient(HttpClient client) { if (this.httpClient != null && client == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from * {@link ConfigurationClientBuilder * ConfigurationClient}. * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder pipeline(HttpPipeline pipeline) { if (this.pipeline != null && pipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link HttpPipelinePolicy} that is used when each request is sent. * * The default retry policy will be used if not provided {@link ConfigurationClientBuilder * to build {@link ConfigurationAsyncClient} or {@link ConfigurationClient}. * @param retryPolicy user's retry policy applied to each request. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder retryPolicy(HttpPipelinePolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link ConfigurationServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link ConfigurationServiceVersion} of the service to be used when making requests. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder serviceVersion(ConfigurationServiceVersion version) { this.version = version; return this; } private String getBuildEndpoint() { if (endpoint != null) { return endpoint; } else if (credential != null) { return credential.getBaseUri(); } else { return null; } } }
Does the order of `subscribeOn()` matter?
private static Mono<Void> generateEvents(AtomicBoolean isRunning) { final Logger logger = LoggerFactory.getLogger("Producer"); final Scheduler scheduler = Schedulers.elastic(); final Duration operationTimeout = Duration.ofSeconds(5); final String[] machineIds = new String[]{"2A", "9B", "6C"}; final Random random = new Random(); final EventHubProducerAsyncClient client = new EventHubClientBuilder() .connectionString(EH_CONNECTION_STRING) .buildAsyncProducerClient(); return Mono.<Void>fromRunnable(() -> { while (isRunning.get()) { int milliseconds = random.nextInt(1000); try { TimeUnit.MILLISECONDS.sleep(milliseconds); } catch (InterruptedException ignored) { } final String machineId = machineIds[random.nextInt(machineIds.length)]; final int temperature = Math.abs(random.nextInt() % 101); logger.info("[{}] Temperature: {}C", machineId, temperature); final EventData event = new EventData(String.valueOf(temperature)); final CreateBatchOptions batchOptions = new CreateBatchOptions().setPartitionKey(machineId); client.createBatch(batchOptions).flatMap(batch -> { batch.tryAdd(event); return client.send(batch); }).block(operationTimeout); } }).subscribeOn(scheduler) .doFinally(signal -> { logger.info("Disposing of producer."); client.close(); }); }
}).subscribeOn(scheduler)
private static Mono<Void> generateEvents(AtomicBoolean isRunning) { final Logger logger = LoggerFactory.getLogger("Producer"); final Scheduler scheduler = Schedulers.elastic(); final Duration operationTimeout = Duration.ofSeconds(5); final String[] machineIds = new String[]{"2A", "9B", "6C"}; final Random random = new Random(); final EventHubProducerAsyncClient client = new EventHubClientBuilder() .connectionString(EH_CONNECTION_STRING) .buildAsyncProducerClient(); return Mono.<Void>fromRunnable(() -> { while (isRunning.get()) { int milliseconds = random.nextInt(1000); try { TimeUnit.MILLISECONDS.sleep(milliseconds); } catch (InterruptedException ignored) { } final String machineId = machineIds[random.nextInt(machineIds.length)]; final int temperature = Math.abs(random.nextInt() % 101); logger.info("[{}] Temperature: {}C", machineId, temperature); final EventData event = new EventData(String.valueOf(temperature)); final CreateBatchOptions batchOptions = new CreateBatchOptions().setPartitionKey(machineId); client.createBatch(batchOptions).flatMap(batch -> { batch.tryAdd(event); return client.send(batch); }).block(operationTimeout); } }).subscribeOn(scheduler) .doFinally(signal -> { logger.info("Disposing of producer."); client.close(); }); }
class EventProcessorClientAggregateEventsSample { private static final Duration REPORTING_INTERVAL = Duration.ofSeconds(5); private static final String EH_CONNECTION_STRING = "Endpoint={endpoint};SharedAccessKeyName={sharedAccessKeyName};" + "SharedAccessKey={sharedAccessKey};EntityPath={eventHubName}"; /** * Main method to demonstrate starting and stopping a {@link EventProcessorClient}. * * @param args The input arguments to this executable. * @throws Exception If there are any errors while running the {@link EventProcessorClient}. */ public static void main(String[] args) throws Exception { final MachineEventsProcessor aggregator = new MachineEventsProcessor(REPORTING_INTERVAL); final EventProcessorClient client = new EventProcessorClientBuilder() .consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME) .connectionString(EH_CONNECTION_STRING) .processPartitionInitialization(context -> aggregator.onInitialize(context)) .processPartitionClose(context -> aggregator.onClose(context)) .processEvent(event -> aggregator.onEvent(event)) .processError(error -> aggregator.onError(error)) .checkpointStore(new InMemoryCheckpointStore()) .buildEventProcessorClient(); System.out.println("Starting event processor"); final AtomicBoolean isRunning = new AtomicBoolean(true); client.start(); generateEvents(isRunning).subscribe(); System.out.println("Sleeping..."); Thread.sleep(TimeUnit.SECONDS.toMillis(30)); isRunning.set(false); System.out.println("Stopping event processor"); client.stop(); System.out.println("Exiting process"); } /** * Helper method that generates events for machines "2A", "9B", and "6C" and sends them to the service. */ }
class EventProcessorClientAggregateEventsSample { private static final Duration REPORTING_INTERVAL = Duration.ofSeconds(5); private static final String EH_CONNECTION_STRING = "Endpoint={endpoint};SharedAccessKeyName={sharedAccessKeyName};" + "SharedAccessKey={sharedAccessKey};EntityPath={eventHubName}"; /** * Main method to demonstrate starting and stopping a {@link EventProcessorClient}. * * @param args The input arguments to this executable. * @throws Exception If there are any errors while running the {@link EventProcessorClient}. */ public static void main(String[] args) throws Exception { final MachineEventsProcessor aggregator = new MachineEventsProcessor(REPORTING_INTERVAL); final EventProcessorClient client = new EventProcessorClientBuilder() .consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME) .connectionString(EH_CONNECTION_STRING) .processPartitionInitialization(context -> aggregator.onInitialize(context)) .processPartitionClose(context -> aggregator.onClose(context)) .processEvent(event -> aggregator.onEvent(event)) .processError(error -> aggregator.onError(error)) .checkpointStore(new InMemoryCheckpointStore()) .buildEventProcessorClient(); System.out.println("Starting event processor"); final AtomicBoolean isRunning = new AtomicBoolean(true); client.start(); generateEvents(isRunning).subscribe(); System.out.println("Sleeping..."); Thread.sleep(TimeUnit.SECONDS.toMillis(30)); isRunning.set(false); System.out.println("Stopping event processor"); client.stop(); System.out.println("Exiting process"); } /** * Helper method that generates events for machines "2A", "9B", and "6C" and sends them to the service. */ }
According to this, order doesn't matter. https://stackoverflow.com/a/37986481/4220757
private static Mono<Void> generateEvents(AtomicBoolean isRunning) { final Logger logger = LoggerFactory.getLogger("Producer"); final Scheduler scheduler = Schedulers.elastic(); final Duration operationTimeout = Duration.ofSeconds(5); final String[] machineIds = new String[]{"2A", "9B", "6C"}; final Random random = new Random(); final EventHubProducerAsyncClient client = new EventHubClientBuilder() .connectionString(EH_CONNECTION_STRING) .buildAsyncProducerClient(); return Mono.<Void>fromRunnable(() -> { while (isRunning.get()) { int milliseconds = random.nextInt(1000); try { TimeUnit.MILLISECONDS.sleep(milliseconds); } catch (InterruptedException ignored) { } final String machineId = machineIds[random.nextInt(machineIds.length)]; final int temperature = Math.abs(random.nextInt() % 101); logger.info("[{}] Temperature: {}C", machineId, temperature); final EventData event = new EventData(String.valueOf(temperature)); final CreateBatchOptions batchOptions = new CreateBatchOptions().setPartitionKey(machineId); client.createBatch(batchOptions).flatMap(batch -> { batch.tryAdd(event); return client.send(batch); }).block(operationTimeout); } }).subscribeOn(scheduler) .doFinally(signal -> { logger.info("Disposing of producer."); client.close(); }); }
}).subscribeOn(scheduler)
private static Mono<Void> generateEvents(AtomicBoolean isRunning) { final Logger logger = LoggerFactory.getLogger("Producer"); final Scheduler scheduler = Schedulers.elastic(); final Duration operationTimeout = Duration.ofSeconds(5); final String[] machineIds = new String[]{"2A", "9B", "6C"}; final Random random = new Random(); final EventHubProducerAsyncClient client = new EventHubClientBuilder() .connectionString(EH_CONNECTION_STRING) .buildAsyncProducerClient(); return Mono.<Void>fromRunnable(() -> { while (isRunning.get()) { int milliseconds = random.nextInt(1000); try { TimeUnit.MILLISECONDS.sleep(milliseconds); } catch (InterruptedException ignored) { } final String machineId = machineIds[random.nextInt(machineIds.length)]; final int temperature = Math.abs(random.nextInt() % 101); logger.info("[{}] Temperature: {}C", machineId, temperature); final EventData event = new EventData(String.valueOf(temperature)); final CreateBatchOptions batchOptions = new CreateBatchOptions().setPartitionKey(machineId); client.createBatch(batchOptions).flatMap(batch -> { batch.tryAdd(event); return client.send(batch); }).block(operationTimeout); } }).subscribeOn(scheduler) .doFinally(signal -> { logger.info("Disposing of producer."); client.close(); }); }
class EventProcessorClientAggregateEventsSample { private static final Duration REPORTING_INTERVAL = Duration.ofSeconds(5); private static final String EH_CONNECTION_STRING = "Endpoint={endpoint};SharedAccessKeyName={sharedAccessKeyName};" + "SharedAccessKey={sharedAccessKey};EntityPath={eventHubName}"; /** * Main method to demonstrate starting and stopping a {@link EventProcessorClient}. * * @param args The input arguments to this executable. * @throws Exception If there are any errors while running the {@link EventProcessorClient}. */ public static void main(String[] args) throws Exception { final MachineEventsProcessor aggregator = new MachineEventsProcessor(REPORTING_INTERVAL); final EventProcessorClient client = new EventProcessorClientBuilder() .consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME) .connectionString(EH_CONNECTION_STRING) .processPartitionInitialization(context -> aggregator.onInitialize(context)) .processPartitionClose(context -> aggregator.onClose(context)) .processEvent(event -> aggregator.onEvent(event)) .processError(error -> aggregator.onError(error)) .checkpointStore(new InMemoryCheckpointStore()) .buildEventProcessorClient(); System.out.println("Starting event processor"); final AtomicBoolean isRunning = new AtomicBoolean(true); client.start(); generateEvents(isRunning).subscribe(); System.out.println("Sleeping..."); Thread.sleep(TimeUnit.SECONDS.toMillis(30)); isRunning.set(false); System.out.println("Stopping event processor"); client.stop(); System.out.println("Exiting process"); } /** * Helper method that generates events for machines "2A", "9B", and "6C" and sends them to the service. */ }
class EventProcessorClientAggregateEventsSample { private static final Duration REPORTING_INTERVAL = Duration.ofSeconds(5); private static final String EH_CONNECTION_STRING = "Endpoint={endpoint};SharedAccessKeyName={sharedAccessKeyName};" + "SharedAccessKey={sharedAccessKey};EntityPath={eventHubName}"; /** * Main method to demonstrate starting and stopping a {@link EventProcessorClient}. * * @param args The input arguments to this executable. * @throws Exception If there are any errors while running the {@link EventProcessorClient}. */ public static void main(String[] args) throws Exception { final MachineEventsProcessor aggregator = new MachineEventsProcessor(REPORTING_INTERVAL); final EventProcessorClient client = new EventProcessorClientBuilder() .consumerGroup(EventHubClientBuilder.DEFAULT_CONSUMER_GROUP_NAME) .connectionString(EH_CONNECTION_STRING) .processPartitionInitialization(context -> aggregator.onInitialize(context)) .processPartitionClose(context -> aggregator.onClose(context)) .processEvent(event -> aggregator.onEvent(event)) .processError(error -> aggregator.onError(error)) .checkpointStore(new InMemoryCheckpointStore()) .buildEventProcessorClient(); System.out.println("Starting event processor"); final AtomicBoolean isRunning = new AtomicBoolean(true); client.start(); generateEvents(isRunning).subscribe(); System.out.println("Sleeping..."); Thread.sleep(TimeUnit.SECONDS.toMillis(30)); isRunning.set(false); System.out.println("Stopping event processor"); client.stop(); System.out.println("Exiting process"); } /** * Helper method that generates events for machines "2A", "9B", and "6C" and sends them to the service. */ }
usually errors are logged with the following pattern ``` logger.error("Error while parsing diagnostics", e) ``` any reason we are doing something else?
public String toString() { try { return objectMapper.writeValueAsString(this.clientSideRequestStatistics); }catch (JsonProcessingException e) { logger.error("Error while parsing diagnostics " + e.getOriginalMessage()); } return StringUtils.EMPTY; }
logger.error("Error while parsing diagnostics " + e.getOriginalMessage());
public String toString() { try { return objectMapper.writeValueAsString(this.clientSideRequestStatistics); }catch (JsonProcessingException e) { logger.error("Error while parsing diagnostics " + e); } return StringUtils.EMPTY; }
class CosmosResponseDiagnostics { private static final Logger logger = LoggerFactory.getLogger(CosmosResponseDiagnostics.class); private static final ObjectMapper objectMapper = new ObjectMapper(); private ClientSideRequestStatistics clientSideRequestStatistics; CosmosResponseDiagnostics() { this.clientSideRequestStatistics = new ClientSideRequestStatistics(); } ClientSideRequestStatistics clientSideRequestStatistics() { return clientSideRequestStatistics; } CosmosResponseDiagnostics clientSideRequestStatistics(ClientSideRequestStatistics clientSideRequestStatistics) { this.clientSideRequestStatistics = clientSideRequestStatistics; return this; } /** * Retrieves Response Diagnostic String * * @return Response Diagnostic String */ @Override /** * Retrieves latency related to the completion of the request * * @return request completion latency */ public Duration getRequestLatency() { return this.clientSideRequestStatistics.getRequestLatency(); } }
class CosmosResponseDiagnostics { private static final Logger logger = LoggerFactory.getLogger(CosmosResponseDiagnostics.class); private static final ObjectMapper objectMapper = new ObjectMapper(); private ClientSideRequestStatistics clientSideRequestStatistics; CosmosResponseDiagnostics() { this.clientSideRequestStatistics = new ClientSideRequestStatistics(); } ClientSideRequestStatistics clientSideRequestStatistics() { return clientSideRequestStatistics; } CosmosResponseDiagnostics clientSideRequestStatistics(ClientSideRequestStatistics clientSideRequestStatistics) { this.clientSideRequestStatistics = clientSideRequestStatistics; return this; } /** * Retrieves Response Diagnostic String * * @return Response Diagnostic String */ @Override /** * Retrieves latency related to the completion of the request * * @return request completion latency */ public Duration getRequestLatency() { return this.clientSideRequestStatistics.getRequestLatency(); } }
same question which I had raised offline. I see that you have different way of capturing retry info in the direct mode, however here on the outer surface of the SDK we are again capturing the retry info, why do we need to capture the retry info in two different places? is this only applicable to GW mode if so it should be in GatewayStoreModel. no? same for all other HttpMethods
private Flux<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, HttpConstants.HttpMethods.GET); if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); request.requestContext.updateRetryContext(documentClientRetryPolicy); } return getStoreProxy(request).processMessage(request); }
request.requestContext.updateRetryContext(documentClientRetryPolicy);
private Flux<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, HttpConstants.HttpMethods.GET); if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); request.requestContext.updateRetryContext(documentClientRetryPolicy, true); } return getStoreProxy(request).processMessage(request); }
class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider { private final static ObjectMapper mapper = Utils.getSimpleObjectMapper(); private final Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class); private final String masterKeyOrResourceToken; private final URI serviceEndpoint; private final ConnectionPolicy connectionPolicy; private final ConsistencyLevel consistencyLevel; private final BaseAuthorizationTokenProvider authorizationTokenProvider; private final UserAgentContainer userAgentContainer; private final boolean hasAuthKeyResourceToken; private final Configs configs; private CosmosKeyCredential cosmosKeyCredential; private TokenResolver tokenResolver; private SessionContainer sessionContainer; private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY; private RxClientCollectionCache collectionCache; private RxStoreModel gatewayProxy; private RxStoreModel storeModel; private GlobalAddressResolver addressResolver; private RxPartitionKeyRangeCache partitionKeyRangeCache; private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap; private IRetryPolicyFactory resetSessionTokenRetryPolicy; /** * Compatibility mode: Allows to specify compatibility mode used by client when * making query requests. Should be removed when application/sql is no longer * supported. */ private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default; private final HttpClient reactorHttpClient; private final GlobalEndpointManager globalEndpointManager; private final RetryPolicy retryPolicy; private volatile boolean useMultipleWriteLocations; private StoreClientFactory storeClientFactory; private GatewayServiceConfigurationReader gatewayConfigurationReader; public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, TokenResolver tokenResolver, CosmosKeyCredential cosmosKeyCredential) { this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, cosmosKeyCredential); this.tokenResolver = tokenResolver; } private RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosKeyCredential cosmosKeyCredential) { this(serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs, cosmosKeyCredential); if (permissionFeed != null && permissionFeed.size() > 0) { this.resourceTokensMap = new HashMap<>(); for (Permission permission : permissionFeed) { String[] segments = StringUtils.split(permission.getResourceLink(), Constants.Properties.PATH_SEPARATOR.charAt(0)); if (segments.length <= 0) { throw new IllegalArgumentException("resourceLink"); } List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null; PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false); if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) { throw new IllegalArgumentException(permission.getResourceLink()); } partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName); if (partitionKeyAndResourceTokenPairs == null) { partitionKeyAndResourceTokenPairs = new ArrayList<>(); this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs); } PartitionKey partitionKey = permission.getResourcePartitionKey(); partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair( partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty, permission.getToken())); logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token", pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken()); } if(this.resourceTokensMap.isEmpty()) { throw new IllegalArgumentException("permissionFeed"); } String firstToken = permissionFeed.get(0).getToken(); if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) { this.firstResourceTokenFromPermissionFeed = firstToken; } } } RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosKeyCredential cosmosKeyCredential) { logger.info( "Initializing DocumentClient with" + " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]", serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol()); this.configs = configs; this.masterKeyOrResourceToken = masterKeyOrResourceToken; this.serviceEndpoint = serviceEndpoint; this.cosmosKeyCredential = cosmosKeyCredential; if (this.cosmosKeyCredential != null) { hasAuthKeyResourceToken = false; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.cosmosKeyCredential); } else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.authorizationTokenProvider = null; hasAuthKeyResourceToken = true; } else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)){ this.cosmosKeyCredential = new CosmosKeyCredential(this.masterKeyOrResourceToken); hasAuthKeyResourceToken = false; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.cosmosKeyCredential); } else { hasAuthKeyResourceToken = false; this.authorizationTokenProvider = null; } if (connectionPolicy != null) { this.connectionPolicy = connectionPolicy; } else { this.connectionPolicy = new ConnectionPolicy(); } this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost()); this.consistencyLevel = consistencyLevel; this.userAgentContainer = new UserAgentContainer(); String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix(); if (userAgentSuffix != null && userAgentSuffix.length() > 0) { userAgentContainer.setSuffix(userAgentSuffix); } this.reactorHttpClient = httpClient(); this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs); this.retryPolicy = new RetryPolicy(this.globalEndpointManager, this.connectionPolicy); this.resetSessionTokenRetryPolicy = retryPolicy; } private void initializeGatewayConfigurationReader() { String resourceToken; if(this.tokenResolver != null) { resourceToken = this.tokenResolver.getAuthorizationToken("GET", "", CosmosResourceType.System, null); } else if(!this.hasAuthKeyResourceToken && this.authorizationTokenProvider == null) { resourceToken = this.firstResourceTokenFromPermissionFeed; } else { assert this.masterKeyOrResourceToken != null || this.cosmosKeyCredential != null; resourceToken = this.masterKeyOrResourceToken; } this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.serviceEndpoint, this.hasAuthKeyResourceToken, resourceToken, this.connectionPolicy, this.authorizationTokenProvider, this.reactorHttpClient); DatabaseAccount databaseAccount = this.gatewayConfigurationReader.initializeReaderAsync().block(); this.useMultipleWriteLocations = this.connectionPolicy.getUsingMultipleWriteLocations() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount); this.globalEndpointManager.refreshLocationAsync(databaseAccount, false).block(); } public void init() { this.gatewayProxy = createRxGatewayProxy(this.sessionContainer, this.consistencyLevel, this.queryCompatibilityMode, this.userAgentContainer, this.globalEndpointManager, this.reactorHttpClient); this.globalEndpointManager.init(); this.initializeGatewayConfigurationReader(); this.collectionCache = new RxClientCollectionCache(this.sessionContainer, this.gatewayProxy, this, this.retryPolicy); this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy); this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this, collectionCache); if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) { this.storeModel = this.gatewayProxy; } else { this.initializeDirectConnectivity(); } } private void initializeDirectConnectivity() { this.storeClientFactory = new StoreClientFactory( this.configs, this.connectionPolicy.getRequestTimeoutInMillis() / 1000, 0, this.userAgentContainer ); this.addressResolver = new GlobalAddressResolver( this.reactorHttpClient, this.globalEndpointManager, this.configs.getProtocol(), this, this.collectionCache, this.partitionKeyRangeCache, userAgentContainer, null, this.connectionPolicy); this.createStoreModel(true); } DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() { return new DatabaseAccountManagerInternal() { @Override public URI getServiceEndpoint() { return RxDocumentClientImpl.this.getServiceEndpoint(); } @Override public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { logger.info("Getting database account endpoint from {}", endpoint); return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint); } @Override public ConnectionPolicy getConnectionPolicy() { return RxDocumentClientImpl.this.getConnectionPolicy(); } }; } RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer, ConsistencyLevel consistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { return new RxGatewayStoreModel(sessionContainer, consistencyLevel, queryCompatibilityMode, userAgentContainer, globalEndpointManager, httpClient); } private HttpClient httpClient() { HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) .withMaxIdleConnectionTimeoutInMillis(this.connectionPolicy.getIdleConnectionTimeoutInMillis()) .withPoolSize(this.connectionPolicy.getMaxPoolSize()) .withHttpProxy(this.connectionPolicy.getProxy()) .withRequestTimeoutInMillis(this.connectionPolicy.getRequestTimeoutInMillis()); return HttpClient.createFixed(httpClientConfig); } private void createStoreModel(boolean subscribeRntbdStatus) { StoreClient storeClient = this.storeClientFactory.createStoreClient( this.addressResolver, this.sessionContainer, this.gatewayConfigurationReader, this, false ); this.storeModel = new ServerStoreModel(storeClient); } @Override public URI getServiceEndpoint() { return this.serviceEndpoint; } @Override public URI getWriteEndpoint() { return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null); } @Override public URI getReadEndpoint() { return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null); } @Override public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } @Override public Flux<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (database == null) { throw new IllegalArgumentException("Database"); } logger.debug("Creating a Database. id: [{}]", database.getId()); validateResource(database); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, database, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Reading a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<Database>> readDatabases(FeedOptions options) { return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT); } private String parentResourceLinkToQueryLink(String parentResouceLink, ResourceType resourceTypeEnum) { switch (resourceTypeEnum) { case Database: return Paths.DATABASES_ROOT; case DocumentCollection: return Utils.joinPath(parentResouceLink, Paths.COLLECTIONS_PATH_SEGMENT); case Document: return Utils.joinPath(parentResouceLink, Paths.DOCUMENTS_PATH_SEGMENT); case Offer: return Paths.OFFERS_ROOT; case User: return Utils.joinPath(parentResouceLink, Paths.USERS_PATH_SEGMENT); case Permission: return Utils.joinPath(parentResouceLink, Paths.PERMISSIONS_PATH_SEGMENT); case Attachment: return Utils.joinPath(parentResouceLink, Paths.ATTACHMENTS_PATH_SEGMENT); case StoredProcedure: return Utils.joinPath(parentResouceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); case Trigger: return Utils.joinPath(parentResouceLink, Paths.TRIGGERS_PATH_SEGMENT); case UserDefinedFunction: return Utils.joinPath(parentResouceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); default: throw new IllegalArgumentException("resource type not supported"); } } private <T extends Resource> Flux<FeedResponse<T>> createQuery( String parentResourceLink, SqlQuerySpec sqlQuery, FeedOptions options, Class<T> klass, ResourceType resourceTypeEnum) { String queryResourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum); UUID activityId = Utils.randomUUID(); IDocumentQueryClient queryClient = DocumentQueryClientImpl(RxDocumentClientImpl.this); Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory.createDocumentQueryExecutionContextAsync(queryClient, resourceTypeEnum, klass, sqlQuery , options, queryResourceLink, false, activityId); return executionContext.flatMap(IDocumentQueryExecutionContext::executeAsync); } @Override public Flux<FeedResponse<Database>> queryDatabases(String query, FeedOptions options) { return queryDatabases(new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, FeedOptions options) { return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database); } @Override public Flux<ResourceResponse<DocumentCollection>> createCollection(String databaseLink, DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink, DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink, collection.getId()); validateResource(collection); String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create, ResourceType.DocumentCollection, path, collection, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); }); } catch (Exception e) { logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Replacing a Collection. id: [{}]", collection.getId()); validateResource(collection); String path = Utils.joinPath(collection.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.DocumentCollection, path, collection, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { if (resourceResponse.getResource() != null) { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); } }); } catch (Exception e) { logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e); return Flux.error(e); } } private Flux<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, HttpConstants.HttpMethods.DELETE); if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); request.requestContext.updateRetryContext(documentClientRetryPolicy); } return getStoreProxy(request).processMessage(request); } Flux<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { populateHeaders(request, HttpConstants.HttpMethods.GET); return gatewayProxy.processMessage(request); } private Flux<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { populateHeaders(request, HttpConstants.HttpMethods.POST); return this.getStoreProxy(request).processMessage(request) .map(response -> { this.captureSessionToken(request, response); return response; } ); } @Override public Flux<ResourceResponse<DocumentCollection>> readCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, FeedOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class, Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query, FeedOptions options) { return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection); } private static String serializeProcedureParams(Object[] objectArray) { String[] stringArray = new String[objectArray.length]; for (int i = 0; i < objectArray.length; ++i) { Object object = objectArray[i]; if (object instanceof JsonSerializable) { stringArray[i] = ((JsonSerializable) object).toJson(); } else { try { stringArray[i] = mapper.writeValueAsString(object); } catch (IOException e) { throw new IllegalArgumentException("Can't serialize the object into the json string", e); } } } return String.format("[%s]", StringUtils.join(stringArray, ",")); } private static void validateResource(Resource resource) { if (!StringUtils.isEmpty(resource.getId())) { if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 || resource.getId().indexOf('?') != -1 || resource.getId().indexOf(' throw new IllegalArgumentException("Id contains illegal chars."); } if (resource.getId().endsWith(" ")) { throw new IllegalArgumentException("Id ends with a space."); } } } private Map<String, String> getRequestHeaders(RequestOptions options) { Map<String, String> headers = new HashMap<>(); if (this.useMultipleWriteLocations) { headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString()); } if (consistencyLevel != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString()); } if (options == null) { return headers; } Map<String, String> customOptions = options.getHeaders(); if (customOptions != null) { headers.putAll(customOptions); } if (options.getAccessCondition() != null) { if (options.getAccessCondition().getType() == AccessConditionType.IF_MATCH) { headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getAccessCondition().getCondition()); } else { headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getAccessCondition().getCondition()); } } if (options.getConsistencyLevel() != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString()); } if (options.getIndexingDirective() != null) { headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString()); } if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) { String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude); } if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) { String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude); } if (!Strings.isNullOrEmpty(options.getSessionToken())) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken()); } if (options.getResourceTokenExpirySeconds() != null) { headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY, String.valueOf(options.getResourceTokenExpirySeconds())); } if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString()); } else if (options.getOfferType() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType()); } if (options.isPopulateQuotaInfo()) { headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true)); } if (options.isScriptLoggingEnabled()) { headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true)); } return headers; } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, String contentAsString, Document document, RequestOptions options) { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request); return collectionObs .map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsString, document, options, collectionValueHolder.v); return request; }); } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, String contentAsString, Object document, RequestOptions options, Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) { return collectionObs.map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsString, document, options, collectionValueHolder.v); return request; }); } private void addPartitionKeyInformation(RxDocumentServiceRequest request, String contentAsString, Object objectDoc, RequestOptions options, DocumentCollection collection) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); PartitionKeyInternal partitionKeyInternal = null; if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){ partitionKeyInternal = BridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else if (options != null && options.getPartitionKey() != null) { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey()); } else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) { partitionKeyInternal = PartitionKeyInternal.getEmpty(); } else if (contentAsString != null) { CosmosItemProperties cosmosItemProperties; if (objectDoc instanceof CosmosItemProperties) { cosmosItemProperties = (CosmosItemProperties) objectDoc; } else { cosmosItemProperties = new CosmosItemProperties(contentAsString); } partitionKeyInternal = extractPartitionKeyValueFromDocument(cosmosItemProperties, partitionKeyDefinition); } else { throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation."); } request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, escapeNonAscii(partitionKeyInternal.toJson())); } private static String escapeNonAscii(String partitionKeyJson) { StringBuilder sb = new StringBuilder(); for (int i = 0; i < partitionKeyJson.length(); i++) { int val = partitionKeyJson.charAt(i); if (val > 127) { sb.append("\\u").append(String.format("%04X", val)); } else { sb.append(partitionKeyJson.charAt(i)); } } return sb.toString(); } private static PartitionKeyInternal extractPartitionKeyValueFromDocument( CosmosItemProperties document, PartitionKeyDefinition partitionKeyDefinition) { if (partitionKeyDefinition != null) { String path = partitionKeyDefinition.getPaths().iterator().next(); List<String> parts = PathParser.getPathParts(path); if (parts.size() >= 1) { Object value = document.getObjectByPath(parts); if (value == null || value.getClass() == ObjectNode.class) { value = BridgeInternal.getNonePartitionKey(partitionKeyDefinition); } if (value instanceof PartitionKeyInternal) { return (PartitionKeyInternal) value; } else { return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false); } } } return null; } private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(String documentCollectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, OperationType operationType) { if (StringUtils.isEmpty(documentCollectionLink)) { throw new IllegalArgumentException("documentCollectionLink"); } if (document == null) { throw new IllegalArgumentException("document"); } String content = toJsonString(document, mapper); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Document, path, requestHeaders, options, content); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request); return addPartitionKeyInformation(request, content, document, options, collectionObs); } private void populateHeaders(RxDocumentServiceRequest request, String httpMethod) { request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null || this.tokenResolver != null || this.cosmosKeyCredential != null) { String resourceName = request.getResourceAddress(); String authorization = this.getUserAuthorizationToken( resourceName, request.getResourceType(), httpMethod, request.getHeaders(), AuthorizationTokenType.PrimaryMasterKey, request.properties); try { authorization = URLEncoder.encode(authorization, "UTF-8"); } catch (UnsupportedEncodingException e) { throw new IllegalStateException("Failed to encode authtoken.", e); } request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); } if ((HttpConstants.HttpMethods.POST.equals(httpMethod) || HttpConstants.HttpMethods.PUT.equals(httpMethod)) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON); } if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) { request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); } } @Override public String getUserAuthorizationToken(String resourceName, ResourceType resourceType, String requestVerb, Map<String, String> headers, AuthorizationTokenType tokenType, Map<String, Object> properties) { if (this.tokenResolver != null) { return this.tokenResolver.getAuthorizationToken(requestVerb, resourceName, this.resolveCosmosResourceType(resourceType), properties != null ? Collections.unmodifiableMap(properties) : null); } else if (cosmosKeyCredential != null) { return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName, resourceType, headers); } else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) { return masterKeyOrResourceToken; } else { assert resourceTokensMap != null; if(resourceType.equals(ResourceType.DatabaseAccount)) { return this.firstResourceTokenFromPermissionFeed; } return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers); } } private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) { try { return CosmosResourceType.valueOf(resourceType.toString()); } catch (IllegalArgumentException e) { return CosmosResourceType.System; } } void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) { this.sessionContainer.setSessionToken(request, response.getResponseHeaders()); } private Flux<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy retryPolicy) { populateHeaders(request, HttpConstants.HttpMethods.POST); RxStoreModel storeProxy = this.getStoreProxy(request); if(request.requestContext != null && retryPolicy.getRetryCount() > 0) { retryPolicy.updateEndTime(); request.requestContext.updateRetryContext(retryPolicy); } return storeProxy.processMessage(request); } private Flux<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, HttpConstants.HttpMethods.POST); Map<String, String> headers = request.getHeaders(); assert (headers != null); headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true"); if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); request.requestContext.updateRetryContext(documentClientRetryPolicy); } return getStoreProxy(request).processMessage(request) .map(response -> { this.captureSessionToken(request, response); return response; } ); } private Flux<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, HttpConstants.HttpMethods.PUT); if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); request.requestContext.updateRetryContext(documentClientRetryPolicy); } return getStoreProxy(request).processMessage(request); } @Override public Flux<ResourceResponse<Document>> createDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy documentClientRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, documentClientRetryPolicy), documentClientRetryPolicy); } private Flux<ResourceResponse<Document>> createDocumentInternal(String collectionLink, Object document, RequestOptions options, final boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy) { try { logger.debug("Creating a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Create); Flux<RxDocumentServiceResponse> responseObservable = requestObs .flux() .flatMap(req -> { if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(req); } return create(req, requestRetryPolicy); }); return responseObservable .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy); } private Flux<ResourceResponse<Document>> upsertDocumentInternal(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink); Flux<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Upsert).flux(); Flux<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(req); } return upsert(req, retryPolicyInstance);}); return responseObservable .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Document>> replaceDocument(String documentLink, Object document, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { String collectionLink = Utils.getCollectionName(documentLink); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(documentLink, document, options, finalRequestRetryPolicy), finalRequestRetryPolicy); } private Flux<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Object document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Document typedDocument = documentFromObject(document, mapper); return this.replaceDocumentInternal(documentLink, typedDocument, options, retryPolicyInstance); } catch (Exception e) { logger.debug("Failure in replacing a document due to [{}]", e.getMessage()); return Flux.error(e); } } @Override public Flux<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { String collectionLink = document.getSelfLink(); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, finalRequestRetryPolicy), requestRetryPolicy); } private Flux<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (document == null) { throw new IllegalArgumentException("document"); } return this.replaceDocumentInternal(document.getSelfLink(), document, options, retryPolicyInstance); } catch (Exception e) { logger.debug("Failure in replacing a database due to [{}]", e.getMessage()); return Flux.error(e); } } private Flux<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { if (document == null) { throw new IllegalArgumentException("document"); } logger.debug("Replacing a Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options); String content = toJsonString(document, mapper); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs); return requestObs.flux().flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return replace(request, retryPolicyInstance) .map(resp -> toResourceResponse(resp, Document.class));} ); } @Override public Flux<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, options, requestRetryPolicy), requestRetryPolicy); } private Flux<ResourceResponse<Document>> deleteDocumentInternal(String documentLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Deleting a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.Document, path, requestHeaders, options); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flux().flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(req); } return this.delete(req, retryPolicyInstance) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));}); } catch (Exception e) { logger.debug("Failure in deleting a document due to [{}]", e.getMessage()); return Flux.error(e); } } @Override public Flux<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Reading a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Document, path, requestHeaders, options); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flux().flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); }); } catch (Exception e) { logger.debug("Failure in reading a document due to [{}]", e.getMessage()); return Flux.error(e); } } @Override public Flux<FeedResponse<Document>> readDocuments(String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return queryDocuments(collectionLink, "SELECT * FROM r", options); } @Override public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, String query, FeedOptions options) { return queryDocuments(collectionLink, new SqlQuerySpec(query), options); } private IDocumentQueryClient DocumentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl) { return new IDocumentQueryClient () { @Override public RxCollectionCache getCollectionCache() { return RxDocumentClientImpl.this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return RxDocumentClientImpl.this.partitionKeyRangeCache; } @Override public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy; } @Override public ConsistencyLevel getDefaultConsistencyLevelAsync() { return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel(); } @Override public ConsistencyLevel getDesiredConsistencyLevelAsync() { return RxDocumentClientImpl.this.consistencyLevel; } @Override public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) { return RxDocumentClientImpl.this.query(request).single(); } @Override public QueryCompatibilityMode getQueryCompatibilityMode() { return QueryCompatibilityMode.Default; } @Override public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) { return null; } }; } @Override public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(collectionLink, querySpec, options, Document.class, ResourceType.Document); } @Override public Flux<FeedResponse<Document>> queryDocumentChangeFeed(final String collectionLink, final ChangeFeedOptions changeFeedOptions) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } ChangeFeedQueryImpl<Document> changeFeedQueryImpl = new ChangeFeedQueryImpl<Document>(this, ResourceType.Document, Document.class, collectionLink, changeFeedOptions); return changeFeedQueryImpl.executeAsync(); } @Override public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class, Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); } private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } validateResource(storedProcedure); String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); return request; } private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (udf == null) { throw new IllegalArgumentException("udf"); } validateResource(udf); String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); return request; } @Override public Flux<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Flux<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Create); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Flux<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Flux<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId()); RxDocumentClientImpl.validateResource(storedProcedure); String path = Utils.joinPath(storedProcedure.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy); } private Flux<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class, Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT)); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query, FeedOptions options) { return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure); } @Override public Flux<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, Object[] procedureParams) { return this.executeStoredProcedure(storedProcedureLink, null, procedureParams); } @Override public Flux<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, RequestOptions options, Object[] procedureParams) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy); } private Flux<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink, RequestOptions options, Object[] procedureParams, DocumentClientRetryPolicy retryPolicy) { try { logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ExecuteJavaScript, ResourceType.StoredProcedure, path, procedureParams != null ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "", requestHeaders, options); Flux<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options).flux(); return reqObs.flatMap(req -> create(request, retryPolicy) .map(response -> { this.captureSessionToken(request, response); return toStoredProcedureResponse(response); })); } catch (Exception e) { logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Upsert); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e); return Flux.error(e); } } private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (trigger == null) { throw new IllegalArgumentException("trigger"); } RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Trigger, path, trigger, requestHeaders, options); return request; } @Override public Flux<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (trigger == null) { throw new IllegalArgumentException("trigger"); } logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId()); RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(trigger.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.Trigger, Trigger.class, Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query, FeedOptions options) { return queryTriggers(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger); } @Override public Flux<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Upsert); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (udf == null) { throw new IllegalArgumentException("udf"); } logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId()); validateResource(udf); String path = Utils.joinPath(udf.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class, Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink, String query, FeedOptions options) { return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction); } @Override public Flux<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Conflict, path, requestHeaders, options); Flux<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options).flux(); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.Conflict, Conflict.class, Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query, FeedOptions options) { return queryConflicts(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict); } @Override public Flux<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options); Flux<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options).flux(); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy); } private Flux<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create); return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e); return Flux.error(e); } } private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (user == null) { throw new IllegalArgumentException("user"); } RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.User, path, user, requestHeaders, options); return request; } @Override public Flux<ResourceResponse<User>> replaceUser(User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (user == null) { throw new IllegalArgumentException("user"); } logger.debug("Replacing a User. user id [{}]", user.getId()); RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(user.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.User, path, user, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e); return Flux.error(e); } } public Flux<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Deleting a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<User>> readUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Reading a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<User>> readUsers(String databaseLink, FeedOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.User, User.class, Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, FeedOptions options) { return queryUsers(databaseLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User); } @Override public Flux<ResourceResponse<Permission>> createPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy()); } private Flux<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Create); return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e); return Flux.error(e); } } private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } if (permission == null) { throw new IllegalArgumentException("permission"); } RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Permission, path, permission, requestHeaders, options); return request; } @Override public Flux<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (permission == null) { throw new IllegalArgumentException("permission"); } logger.debug("Replacing a Permission. permission id [{}]", permission.getId()); RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(permission.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Reading a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<Permission>> readPermissions(String userLink, FeedOptions options) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } return readFeed(options, ResourceType.Permission, Permission.class, Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query, FeedOptions options) { return queryPermissions(userLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission); } @Override public Flux<ResourceResponse<Offer>> replaceOffer(Offer offer) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy); } private Flux<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) { try { if (offer == null) { throw new IllegalArgumentException("offer"); } logger.debug("Replacing an Offer. offer id [{}]", offer.getId()); RxDocumentClientImpl.validateResource(offer); String path = Utils.joinPath(offer.getSelfLink(), null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.Offer, path, offer, null, null); return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Offer>> readOffer(String offerLink) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(offerLink)) { throw new IllegalArgumentException("offerLink"); } logger.debug("Reading an Offer. offerLink [{}]", offerLink); String path = Utils.joinPath(offerLink, null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<Offer>> readOffers(FeedOptions options) { return readFeed(options, ResourceType.Offer, Offer.class, Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null)); } private <T extends Resource> Flux<FeedResponse<T>> readFeedCollectionChild(FeedOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) { if (options == null) { options = new FeedOptions(); } int maxPageSize = options.maxItemCount() != null ? options.maxItemCount() : -1; final FeedOptions finalFeedOptions = options; RequestOptions requestOptions = new RequestOptions(); requestOptions.setPartitionKey(options.partitionKey()); BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> { Map<String, String> requestHeaders = new HashMap<>(); if (continuationToken != null) { requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); } requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize)); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, finalFeedOptions); return request; }; Function<RxDocumentServiceRequest, Flux<FeedResponse<T>>> executeFunc = request -> { return ObservableHelper.inlineIfPossibleAsObs(() -> { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request); Mono<RxDocumentServiceRequest> requestObs = this.addPartitionKeyInformation(request, null, null, requestOptions, collectionObs); return requestObs.flux().flatMap(req -> this.readFeed(req) .map(response -> toFeedResponsePage(response, klass))); }, this.resetSessionTokenRetryPolicy.getRequestPolicy()); }; return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize); } private <T extends Resource> Flux<FeedResponse<T>> readFeed(FeedOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) { if (options == null) { options = new FeedOptions(); } int maxPageSize = options.maxItemCount() != null ? options.maxItemCount() : -1; final FeedOptions finalFeedOptions = options; BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> { Map<String, String> requestHeaders = new HashMap<>(); if (continuationToken != null) { requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); } requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize)); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, finalFeedOptions); return request; }; Function<RxDocumentServiceRequest, Flux<FeedResponse<T>>> executeFunc = request -> { return ObservableHelper.inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(response, klass)), this.resetSessionTokenRetryPolicy.getRequestPolicy()); }; return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize); } @Override public Flux<FeedResponse<Offer>> queryOffers(String query, FeedOptions options) { return queryOffers(new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, FeedOptions options) { return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer); } @Override public Flux<DatabaseAccount> getDatabaseAccount() { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy), documentClientRetryPolicy); } private Flux<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Getting Database Account"); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.DatabaseAccount, "", (HashMap<String, String>) null, null); return this.read(request, documentClientRetryPolicy).map(response -> toDatabaseAccount(response)); } catch (Exception e) { logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e); return Flux.error(e); } } public Object getSession() { return this.sessionContainer; } public void setSession(Object sessionContainer) { this.sessionContainer = (SessionContainer) sessionContainer; } public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { return Flux.defer(() -> { RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null); this.populateHeaders(request, HttpConstants.HttpMethods.GET); request.setEndpointOverride(endpoint); return this.gatewayProxy.processMessage(request).doOnError(e -> { String message = String.format("Failed to retrieve database account information. %s", e.getCause() != null ? e.getCause().toString() : e.toString()); logger.warn(message); }).map(rsp -> rsp.getResource(DatabaseAccount.class)) .doOnNext(databaseAccount -> { this.useMultipleWriteLocations = this.connectionPolicy.getUsingMultipleWriteLocations() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount); }); }); } /** * Certain requests must be routed through gateway even when the client connectivity mode is direct. * * @param request * @return RxStoreModel */ private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) { if (request.UseGatewayMode) { return this.gatewayProxy; } ResourceType resourceType = request.getResourceType(); OperationType operationType = request.getOperationType(); if (resourceType == ResourceType.Offer || resourceType.isScript() && operationType != OperationType.ExecuteJavaScript || resourceType == ResourceType.PartitionKeyRange) { return this.gatewayProxy; } if (operationType == OperationType.Create || operationType == OperationType.Upsert) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection || resourceType == ResourceType.Permission) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Delete) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Replace) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Read) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else { if ((request.getOperationType() == OperationType.Query || request.getOperationType() == OperationType.SqlQuery) && Utils.isCollectionChild(request.getResourceType())) { if (request.getPartitionKeyRangeIdentity() == null) { return this.gatewayProxy; } } return this.storeModel; } } @Override public void close() { logger.info("Shutting down ..."); LifeCycleUtils.closeQuietly(this.globalEndpointManager); LifeCycleUtils.closeQuietly(this.storeClientFactory); try { this.reactorHttpClient.shutdown(); } catch (Exception e) { logger.warn("Failure in shutting down reactorHttpClient", e); } } }
class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider { private final static ObjectMapper mapper = Utils.getSimpleObjectMapper(); private final Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class); private final String masterKeyOrResourceToken; private final URI serviceEndpoint; private final ConnectionPolicy connectionPolicy; private final ConsistencyLevel consistencyLevel; private final BaseAuthorizationTokenProvider authorizationTokenProvider; private final UserAgentContainer userAgentContainer; private final boolean hasAuthKeyResourceToken; private final Configs configs; private CosmosKeyCredential cosmosKeyCredential; private TokenResolver tokenResolver; private SessionContainer sessionContainer; private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY; private RxClientCollectionCache collectionCache; private RxStoreModel gatewayProxy; private RxStoreModel storeModel; private GlobalAddressResolver addressResolver; private RxPartitionKeyRangeCache partitionKeyRangeCache; private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap; private IRetryPolicyFactory resetSessionTokenRetryPolicy; /** * Compatibility mode: Allows to specify compatibility mode used by client when * making query requests. Should be removed when application/sql is no longer * supported. */ private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default; private final HttpClient reactorHttpClient; private final GlobalEndpointManager globalEndpointManager; private final RetryPolicy retryPolicy; private volatile boolean useMultipleWriteLocations; private StoreClientFactory storeClientFactory; private GatewayServiceConfigurationReader gatewayConfigurationReader; public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, TokenResolver tokenResolver, CosmosKeyCredential cosmosKeyCredential) { this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, cosmosKeyCredential); this.tokenResolver = tokenResolver; } private RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosKeyCredential cosmosKeyCredential) { this(serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs, cosmosKeyCredential); if (permissionFeed != null && permissionFeed.size() > 0) { this.resourceTokensMap = new HashMap<>(); for (Permission permission : permissionFeed) { String[] segments = StringUtils.split(permission.getResourceLink(), Constants.Properties.PATH_SEPARATOR.charAt(0)); if (segments.length <= 0) { throw new IllegalArgumentException("resourceLink"); } List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null; PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false); if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) { throw new IllegalArgumentException(permission.getResourceLink()); } partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName); if (partitionKeyAndResourceTokenPairs == null) { partitionKeyAndResourceTokenPairs = new ArrayList<>(); this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs); } PartitionKey partitionKey = permission.getResourcePartitionKey(); partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair( partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty, permission.getToken())); logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token", pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken()); } if(this.resourceTokensMap.isEmpty()) { throw new IllegalArgumentException("permissionFeed"); } String firstToken = permissionFeed.get(0).getToken(); if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) { this.firstResourceTokenFromPermissionFeed = firstToken; } } } RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosKeyCredential cosmosKeyCredential) { logger.info( "Initializing DocumentClient with" + " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]", serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol()); this.configs = configs; this.masterKeyOrResourceToken = masterKeyOrResourceToken; this.serviceEndpoint = serviceEndpoint; this.cosmosKeyCredential = cosmosKeyCredential; if (this.cosmosKeyCredential != null) { hasAuthKeyResourceToken = false; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.cosmosKeyCredential); } else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.authorizationTokenProvider = null; hasAuthKeyResourceToken = true; } else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)){ this.cosmosKeyCredential = new CosmosKeyCredential(this.masterKeyOrResourceToken); hasAuthKeyResourceToken = false; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.cosmosKeyCredential); } else { hasAuthKeyResourceToken = false; this.authorizationTokenProvider = null; } if (connectionPolicy != null) { this.connectionPolicy = connectionPolicy; } else { this.connectionPolicy = new ConnectionPolicy(); } this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost()); this.consistencyLevel = consistencyLevel; this.userAgentContainer = new UserAgentContainer(); String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix(); if (userAgentSuffix != null && userAgentSuffix.length() > 0) { userAgentContainer.setSuffix(userAgentSuffix); } this.reactorHttpClient = httpClient(); this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs); this.retryPolicy = new RetryPolicy(this.globalEndpointManager, this.connectionPolicy); this.resetSessionTokenRetryPolicy = retryPolicy; } private void initializeGatewayConfigurationReader() { String resourceToken; if(this.tokenResolver != null) { resourceToken = this.tokenResolver.getAuthorizationToken("GET", "", CosmosResourceType.System, null); } else if(!this.hasAuthKeyResourceToken && this.authorizationTokenProvider == null) { resourceToken = this.firstResourceTokenFromPermissionFeed; } else { assert this.masterKeyOrResourceToken != null || this.cosmosKeyCredential != null; resourceToken = this.masterKeyOrResourceToken; } this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.serviceEndpoint, this.hasAuthKeyResourceToken, resourceToken, this.connectionPolicy, this.authorizationTokenProvider, this.reactorHttpClient); DatabaseAccount databaseAccount = this.gatewayConfigurationReader.initializeReaderAsync().block(); this.useMultipleWriteLocations = this.connectionPolicy.getUsingMultipleWriteLocations() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount); this.globalEndpointManager.refreshLocationAsync(databaseAccount, false).block(); } public void init() { this.gatewayProxy = createRxGatewayProxy(this.sessionContainer, this.consistencyLevel, this.queryCompatibilityMode, this.userAgentContainer, this.globalEndpointManager, this.reactorHttpClient); this.globalEndpointManager.init(); this.initializeGatewayConfigurationReader(); this.collectionCache = new RxClientCollectionCache(this.sessionContainer, this.gatewayProxy, this, this.retryPolicy); this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy); this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this, collectionCache); if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) { this.storeModel = this.gatewayProxy; } else { this.initializeDirectConnectivity(); } } private void initializeDirectConnectivity() { this.storeClientFactory = new StoreClientFactory( this.configs, this.connectionPolicy.getRequestTimeoutInMillis() / 1000, 0, this.userAgentContainer ); this.addressResolver = new GlobalAddressResolver( this.reactorHttpClient, this.globalEndpointManager, this.configs.getProtocol(), this, this.collectionCache, this.partitionKeyRangeCache, userAgentContainer, null, this.connectionPolicy); this.createStoreModel(true); } DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() { return new DatabaseAccountManagerInternal() { @Override public URI getServiceEndpoint() { return RxDocumentClientImpl.this.getServiceEndpoint(); } @Override public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { logger.info("Getting database account endpoint from {}", endpoint); return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint); } @Override public ConnectionPolicy getConnectionPolicy() { return RxDocumentClientImpl.this.getConnectionPolicy(); } }; } RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer, ConsistencyLevel consistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { return new RxGatewayStoreModel(sessionContainer, consistencyLevel, queryCompatibilityMode, userAgentContainer, globalEndpointManager, httpClient); } private HttpClient httpClient() { HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) .withMaxIdleConnectionTimeoutInMillis(this.connectionPolicy.getIdleConnectionTimeoutInMillis()) .withPoolSize(this.connectionPolicy.getMaxPoolSize()) .withHttpProxy(this.connectionPolicy.getProxy()) .withRequestTimeoutInMillis(this.connectionPolicy.getRequestTimeoutInMillis()); return HttpClient.createFixed(httpClientConfig); } private void createStoreModel(boolean subscribeRntbdStatus) { StoreClient storeClient = this.storeClientFactory.createStoreClient( this.addressResolver, this.sessionContainer, this.gatewayConfigurationReader, this, false ); this.storeModel = new ServerStoreModel(storeClient); } @Override public URI getServiceEndpoint() { return this.serviceEndpoint; } @Override public URI getWriteEndpoint() { return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null); } @Override public URI getReadEndpoint() { return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null); } @Override public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } @Override public Flux<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) { if (database == null) { throw new IllegalArgumentException("Database"); } logger.debug("Creating a Database. id: [{}]", database.getId()); validateResource(database); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, database, requestHeaders, options); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(retryPolicyInstance, request), retryPolicyInstance); } private Flux<ResourceResponse<Database>> createDatabaseInternal(DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { try { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.Database, path, requestHeaders, options); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(retryPolicyInstance, request), retryPolicyInstance); } private Flux<ResourceResponse<Database>> deleteDatabaseInternal(DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { try { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Reading a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Database, path, requestHeaders, options); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(retryPolicyInstance, request), retryPolicyInstance); } private Flux<ResourceResponse<Database>> readDatabaseInternal(DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { try { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<Database>> readDatabases(FeedOptions options) { return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT); } private String parentResourceLinkToQueryLink(String parentResouceLink, ResourceType resourceTypeEnum) { switch (resourceTypeEnum) { case Database: return Paths.DATABASES_ROOT; case DocumentCollection: return Utils.joinPath(parentResouceLink, Paths.COLLECTIONS_PATH_SEGMENT); case Document: return Utils.joinPath(parentResouceLink, Paths.DOCUMENTS_PATH_SEGMENT); case Offer: return Paths.OFFERS_ROOT; case User: return Utils.joinPath(parentResouceLink, Paths.USERS_PATH_SEGMENT); case Permission: return Utils.joinPath(parentResouceLink, Paths.PERMISSIONS_PATH_SEGMENT); case Attachment: return Utils.joinPath(parentResouceLink, Paths.ATTACHMENTS_PATH_SEGMENT); case StoredProcedure: return Utils.joinPath(parentResouceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); case Trigger: return Utils.joinPath(parentResouceLink, Paths.TRIGGERS_PATH_SEGMENT); case UserDefinedFunction: return Utils.joinPath(parentResouceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); default: throw new IllegalArgumentException("resource type not supported"); } } private <T extends Resource> Flux<FeedResponse<T>> createQuery( String parentResourceLink, SqlQuerySpec sqlQuery, FeedOptions options, Class<T> klass, ResourceType resourceTypeEnum) { String queryResourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum); UUID activityId = Utils.randomUUID(); IDocumentQueryClient queryClient = DocumentQueryClientImpl(RxDocumentClientImpl.this); Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory.createDocumentQueryExecutionContextAsync(queryClient, resourceTypeEnum, klass, sqlQuery , options, queryResourceLink, false, activityId); return executionContext.flatMap(IDocumentQueryExecutionContext::executeAsync); } @Override public Flux<FeedResponse<Database>> queryDatabases(String query, FeedOptions options) { return queryDatabases(new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, FeedOptions options) { return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database); } @Override public Flux<ResourceResponse<DocumentCollection>> createCollection(String databaseLink, DocumentCollection collection, RequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink, collection.getId()); validateResource(collection); String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create, ResourceType.DocumentCollection, path, collection, requestHeaders, options); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(retryPolicyInstance, request), retryPolicyInstance); } private Flux<ResourceResponse<DocumentCollection>> createCollectionInternal(DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { try { if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); }); } catch (Exception e) { logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection, RequestOptions options) { if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Replacing a Collection. id: [{}]", collection.getId()); validateResource(collection); String path = Utils.joinPath(collection.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.DocumentCollection, path, collection, requestHeaders, options); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(retryPolicyInstance, request), retryPolicyInstance); } private Flux<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { try { if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { if (resourceResponse.getResource() != null) { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); } }); } catch (Exception e) { logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink, RequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(retryPolicyInstance, request), retryPolicyInstance); } private Flux<ResourceResponse<DocumentCollection>> deleteCollectionInternal(DocumentClientRetryPolicy retryPolicyInstance,RxDocumentServiceRequest request) { try { if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e); return Flux.error(e); } } private Flux<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, HttpConstants.HttpMethods.DELETE); if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); request.requestContext.updateRetryContext(documentClientRetryPolicy, true); } return getStoreProxy(request).processMessage(request); } Flux<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { populateHeaders(request, HttpConstants.HttpMethods.GET); return gatewayProxy.processMessage(request); } private Flux<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { populateHeaders(request, HttpConstants.HttpMethods.POST); return this.getStoreProxy(request).processMessage(request) .map(response -> { this.captureSessionToken(request, response); return response; } ); } @Override public Flux<ResourceResponse<DocumentCollection>> readCollection(String collectionLink, RequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(retryPolicyInstance, request), retryPolicyInstance); } private Flux<ResourceResponse<DocumentCollection>> readCollectionInternal(DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { try { if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, FeedOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class, Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query, FeedOptions options) { return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection); } private static String serializeProcedureParams(Object[] objectArray) { String[] stringArray = new String[objectArray.length]; for (int i = 0; i < objectArray.length; ++i) { Object object = objectArray[i]; if (object instanceof JsonSerializable) { stringArray[i] = ((JsonSerializable) object).toJson(); } else { try { stringArray[i] = mapper.writeValueAsString(object); } catch (IOException e) { throw new IllegalArgumentException("Can't serialize the object into the json string", e); } } } return String.format("[%s]", StringUtils.join(stringArray, ",")); } private static void validateResource(Resource resource) { if (!StringUtils.isEmpty(resource.getId())) { if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 || resource.getId().indexOf('?') != -1 || resource.getId().indexOf(' throw new IllegalArgumentException("Id contains illegal chars."); } if (resource.getId().endsWith(" ")) { throw new IllegalArgumentException("Id ends with a space."); } } } private Map<String, String> getRequestHeaders(RequestOptions options) { Map<String, String> headers = new HashMap<>(); if (this.useMultipleWriteLocations) { headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString()); } if (consistencyLevel != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString()); } if (options == null) { return headers; } Map<String, String> customOptions = options.getHeaders(); if (customOptions != null) { headers.putAll(customOptions); } if (options.getAccessCondition() != null) { if (options.getAccessCondition().getType() == AccessConditionType.IF_MATCH) { headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getAccessCondition().getCondition()); } else { headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getAccessCondition().getCondition()); } } if (options.getConsistencyLevel() != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString()); } if (options.getIndexingDirective() != null) { headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString()); } if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) { String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude); } if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) { String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude); } if (!Strings.isNullOrEmpty(options.getSessionToken())) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken()); } if (options.getResourceTokenExpirySeconds() != null) { headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY, String.valueOf(options.getResourceTokenExpirySeconds())); } if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString()); } else if (options.getOfferType() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType()); } if (options.isPopulateQuotaInfo()) { headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true)); } if (options.isScriptLoggingEnabled()) { headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true)); } return headers; } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, String contentAsString, Document document, RequestOptions options) { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request); return collectionObs .map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsString, document, options, collectionValueHolder.v); return request; }); } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, String contentAsString, Object document, RequestOptions options, Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) { return collectionObs.map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsString, document, options, collectionValueHolder.v); return request; }); } private void addPartitionKeyInformation(RxDocumentServiceRequest request, String contentAsString, Object objectDoc, RequestOptions options, DocumentCollection collection) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); PartitionKeyInternal partitionKeyInternal = null; if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){ partitionKeyInternal = BridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else if (options != null && options.getPartitionKey() != null) { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey()); } else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) { partitionKeyInternal = PartitionKeyInternal.getEmpty(); } else if (contentAsString != null) { CosmosItemProperties cosmosItemProperties; if (objectDoc instanceof CosmosItemProperties) { cosmosItemProperties = (CosmosItemProperties) objectDoc; } else { cosmosItemProperties = new CosmosItemProperties(contentAsString); } partitionKeyInternal = extractPartitionKeyValueFromDocument(cosmosItemProperties, partitionKeyDefinition); } else { throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation."); } request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, escapeNonAscii(partitionKeyInternal.toJson())); } private static String escapeNonAscii(String partitionKeyJson) { StringBuilder sb = new StringBuilder(); for (int i = 0; i < partitionKeyJson.length(); i++) { int val = partitionKeyJson.charAt(i); if (val > 127) { sb.append("\\u").append(String.format("%04X", val)); } else { sb.append(partitionKeyJson.charAt(i)); } } return sb.toString(); } private static PartitionKeyInternal extractPartitionKeyValueFromDocument( CosmosItemProperties document, PartitionKeyDefinition partitionKeyDefinition) { if (partitionKeyDefinition != null) { String path = partitionKeyDefinition.getPaths().iterator().next(); List<String> parts = PathParser.getPathParts(path); if (parts.size() >= 1) { Object value = document.getObjectByPath(parts); if (value == null || value.getClass() == ObjectNode.class) { value = BridgeInternal.getNonePartitionKey(partitionKeyDefinition); } if (value instanceof PartitionKeyInternal) { return (PartitionKeyInternal) value; } else { return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false); } } } return null; } private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(String documentCollectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, OperationType operationType) { if (StringUtils.isEmpty(documentCollectionLink)) { throw new IllegalArgumentException("documentCollectionLink"); } if (document == null) { throw new IllegalArgumentException("document"); } String content = toJsonString(document, mapper); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Document, path, requestHeaders, options, content); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request); return addPartitionKeyInformation(request, content, document, options, collectionObs); } private void populateHeaders(RxDocumentServiceRequest request, String httpMethod) { request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null || this.tokenResolver != null || this.cosmosKeyCredential != null) { String resourceName = request.getResourceAddress(); String authorization = this.getUserAuthorizationToken( resourceName, request.getResourceType(), httpMethod, request.getHeaders(), AuthorizationTokenType.PrimaryMasterKey, request.properties); try { authorization = URLEncoder.encode(authorization, "UTF-8"); } catch (UnsupportedEncodingException e) { throw new IllegalStateException("Failed to encode authtoken.", e); } request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); } if ((HttpConstants.HttpMethods.POST.equals(httpMethod) || HttpConstants.HttpMethods.PUT.equals(httpMethod)) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON); } if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) { request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); } } @Override public String getUserAuthorizationToken(String resourceName, ResourceType resourceType, String requestVerb, Map<String, String> headers, AuthorizationTokenType tokenType, Map<String, Object> properties) { if (this.tokenResolver != null) { return this.tokenResolver.getAuthorizationToken(requestVerb, resourceName, this.resolveCosmosResourceType(resourceType), properties != null ? Collections.unmodifiableMap(properties) : null); } else if (cosmosKeyCredential != null) { return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName, resourceType, headers); } else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) { return masterKeyOrResourceToken; } else { assert resourceTokensMap != null; if(resourceType.equals(ResourceType.DatabaseAccount)) { return this.firstResourceTokenFromPermissionFeed; } return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers); } } private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) { try { return CosmosResourceType.valueOf(resourceType.toString()); } catch (IllegalArgumentException e) { return CosmosResourceType.System; } } void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) { this.sessionContainer.setSessionToken(request, response.getResponseHeaders()); } private Flux<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy retryPolicy) { populateHeaders(request, HttpConstants.HttpMethods.POST); RxStoreModel storeProxy = this.getStoreProxy(request); if(request.requestContext != null && retryPolicy.getRetryCount() > 0) { retryPolicy.updateEndTime(); request.requestContext.updateRetryContext(retryPolicy, true); } return storeProxy.processMessage(request); } private Flux<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, HttpConstants.HttpMethods.POST); Map<String, String> headers = request.getHeaders(); assert (headers != null); headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true"); if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); request.requestContext.updateRetryContext(documentClientRetryPolicy, true); } return getStoreProxy(request).processMessage(request) .map(response -> { this.captureSessionToken(request, response); return response; } ); } private Flux<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, HttpConstants.HttpMethods.PUT); if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); request.requestContext.updateRetryContext(documentClientRetryPolicy, true); } return getStoreProxy(request).processMessage(request); } @Override public Flux<ResourceResponse<Document>> createDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy documentClientRetryPolicy = requestRetryPolicy; logger.debug("Creating a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Create); return requestObs.flux() .flatMap(req -> { return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(documentClientRetryPolicy, req), documentClientRetryPolicy); }); } private Flux<ResourceResponse<Document>> createDocumentInternal(DocumentClientRetryPolicy requestRetryPolicy, RxDocumentServiceRequest request) { try { if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } Flux<RxDocumentServiceResponse> responseObservable = create(request, requestRetryPolicy); return responseObservable .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink); DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; Flux<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Upsert).flux(); return reqObs.flatMap(req -> { return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(finalRetryPolicyInstance, req), finalRetryPolicyInstance); }); } private Flux<ResourceResponse<Document>> upsertDocumentInternal(DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { try { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Flux<RxDocumentServiceResponse> responseObservable = upsert(request, retryPolicyInstance); return responseObservable .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Document>> replaceDocument(String documentLink, Object document, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { String collectionLink = Utils.getCollectionName(documentLink); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; if (document == null) { throw new IllegalArgumentException("document"); } if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Document typedDocument = documentFromObject(document, mapper); logger.debug("Replacing a Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options); String content = toJsonString(typedDocument, mapper); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(typedDocument, options, content, finalRequestRetryPolicy, request), finalRequestRetryPolicy); } @Override public Flux<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { String collectionLink = document.getSelfLink(); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; if (document == null) { throw new IllegalArgumentException("document"); } logger.debug("Replacing a Document. documentLink: [{}]", document.getSelfLink()); final String path = Utils.joinPath(document.getSelfLink(), null); final Map<String, String> requestHeaders = getRequestHeaders(options); String content = toJsonString(document, mapper); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, content, finalRequestRetryPolicy, request), requestRetryPolicy); } private Flux<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, String content, DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { try { if (document == null) { throw new IllegalArgumentException("document"); } return this.replaceDocumentInternal(document.getSelfLink(), document, options, content, retryPolicyInstance, request); } catch (Exception e) { logger.debug("Failure in replacing a database due to [{}]", e.getMessage()); return Flux.error(e); } } private Flux<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Document document, RequestOptions options, String content, DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs); return requestObs.flux().flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return replace(request, retryPolicyInstance) .map(resp -> toResourceResponse(resp, Document.class));} ); } @Override public Flux<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Deleting a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.Document, path, requestHeaders, options); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(options, requestRetryPolicy, request), requestRetryPolicy); } private Flux<ResourceResponse<Document>> deleteDocumentInternal(RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { try { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flux().flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(req); } return this.delete(req, retryPolicyInstance) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));}); } catch (Exception e) { logger.debug("Failure in deleting a document due to [{}]", e.getMessage()); return Flux.error(e); } } @Override public Flux<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Reading a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Document, path, requestHeaders, options); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(options, retryPolicyInstance, request), retryPolicyInstance); } private Flux<ResourceResponse<Document>> readDocumentInternal(RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { try { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flux().flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); }); } catch (Exception e) { logger.debug("Failure in reading a document due to [{}]", e.getMessage()); return Flux.error(e); } } @Override public Flux<FeedResponse<Document>> readDocuments(String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return queryDocuments(collectionLink, "SELECT * FROM r", options); } @Override public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, String query, FeedOptions options) { return queryDocuments(collectionLink, new SqlQuerySpec(query), options); } private IDocumentQueryClient DocumentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl) { return new IDocumentQueryClient () { @Override public RxCollectionCache getCollectionCache() { return RxDocumentClientImpl.this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return RxDocumentClientImpl.this.partitionKeyRangeCache; } @Override public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy; } @Override public ConsistencyLevel getDefaultConsistencyLevelAsync() { return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel(); } @Override public ConsistencyLevel getDesiredConsistencyLevelAsync() { return RxDocumentClientImpl.this.consistencyLevel; } @Override public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) { return RxDocumentClientImpl.this.query(request).single(); } @Override public QueryCompatibilityMode getQueryCompatibilityMode() { return QueryCompatibilityMode.Default; } @Override public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) { return null; } }; } @Override public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(collectionLink, querySpec, options, Document.class, ResourceType.Document); } @Override public Flux<FeedResponse<Document>> queryDocumentChangeFeed(final String collectionLink, final ChangeFeedOptions changeFeedOptions) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } ChangeFeedQueryImpl<Document> changeFeedQueryImpl = new ChangeFeedQueryImpl<Document>(this, ResourceType.Document, Document.class, collectionLink, changeFeedOptions); return changeFeedQueryImpl.executeAsync(); } @Override public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class, Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); } private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } validateResource(storedProcedure); String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); return request; } private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (udf == null) { throw new IllegalArgumentException("udf"); } validateResource(udf); String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); return request; } @Override public Flux<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Flux<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Create); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Flux<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Flux<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId()); RxDocumentClientImpl.validateResource(storedProcedure); String path = Utils.joinPath(storedProcedure.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy); } private Flux<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class, Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT)); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query, FeedOptions options) { return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure); } @Override public Flux<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, Object[] procedureParams) { return this.executeStoredProcedure(storedProcedureLink, null, procedureParams); } @Override public Flux<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, RequestOptions options, Object[] procedureParams) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy); } private Flux<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink, RequestOptions options, Object[] procedureParams, DocumentClientRetryPolicy retryPolicy) { try { logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ExecuteJavaScript, ResourceType.StoredProcedure, path, procedureParams != null ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "", requestHeaders, options); Flux<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options).flux(); return reqObs.flatMap(req -> create(request, retryPolicy) .map(response -> { this.captureSessionToken(request, response); return toStoredProcedureResponse(response); })); } catch (Exception e) { logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Upsert); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e); return Flux.error(e); } } private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (trigger == null) { throw new IllegalArgumentException("trigger"); } RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Trigger, path, trigger, requestHeaders, options); return request; } @Override public Flux<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (trigger == null) { throw new IllegalArgumentException("trigger"); } logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId()); RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(trigger.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.Trigger, Trigger.class, Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query, FeedOptions options) { return queryTriggers(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger); } @Override public Flux<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Upsert); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (udf == null) { throw new IllegalArgumentException("udf"); } logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId()); validateResource(udf); String path = Utils.joinPath(udf.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class, Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink, String query, FeedOptions options) { return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction); } @Override public Flux<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Conflict, path, requestHeaders, options); Flux<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options).flux(); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.Conflict, Conflict.class, Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query, FeedOptions options) { return queryConflicts(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict); } @Override public Flux<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options); Flux<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options).flux(); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy); } private Flux<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create); return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e); return Flux.error(e); } } private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (user == null) { throw new IllegalArgumentException("user"); } RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.User, path, user, requestHeaders, options); return request; } @Override public Flux<ResourceResponse<User>> replaceUser(User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (user == null) { throw new IllegalArgumentException("user"); } logger.debug("Replacing a User. user id [{}]", user.getId()); RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(user.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.User, path, user, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e); return Flux.error(e); } } public Flux<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Deleting a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<User>> readUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Reading a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<User>> readUsers(String databaseLink, FeedOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.User, User.class, Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, FeedOptions options) { return queryUsers(databaseLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User); } @Override public Flux<ResourceResponse<Permission>> createPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy()); } private Flux<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Create); return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e); return Flux.error(e); } } private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } if (permission == null) { throw new IllegalArgumentException("permission"); } RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Permission, path, permission, requestHeaders, options); return request; } @Override public Flux<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (permission == null) { throw new IllegalArgumentException("permission"); } logger.debug("Replacing a Permission. permission id [{}]", permission.getId()); RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(permission.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Reading a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<Permission>> readPermissions(String userLink, FeedOptions options) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } return readFeed(options, ResourceType.Permission, Permission.class, Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query, FeedOptions options) { return queryPermissions(userLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission); } @Override public Flux<ResourceResponse<Offer>> replaceOffer(Offer offer) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy); } private Flux<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) { try { if (offer == null) { throw new IllegalArgumentException("offer"); } logger.debug("Replacing an Offer. offer id [{}]", offer.getId()); RxDocumentClientImpl.validateResource(offer); String path = Utils.joinPath(offer.getSelfLink(), null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.Offer, path, offer, null, null); return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Offer>> readOffer(String offerLink) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(offerLink)) { throw new IllegalArgumentException("offerLink"); } logger.debug("Reading an Offer. offerLink [{}]", offerLink); String path = Utils.joinPath(offerLink, null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<Offer>> readOffers(FeedOptions options) { return readFeed(options, ResourceType.Offer, Offer.class, Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null)); } private <T extends Resource> Flux<FeedResponse<T>> readFeedCollectionChild(FeedOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) { if (options == null) { options = new FeedOptions(); } int maxPageSize = options.maxItemCount() != null ? options.maxItemCount() : -1; final FeedOptions finalFeedOptions = options; RequestOptions requestOptions = new RequestOptions(); requestOptions.setPartitionKey(options.partitionKey()); BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> { Map<String, String> requestHeaders = new HashMap<>(); if (continuationToken != null) { requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); } requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize)); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, finalFeedOptions); return request; }; Function<RxDocumentServiceRequest, Flux<FeedResponse<T>>> executeFunc = request -> { return ObservableHelper.inlineIfPossibleAsObs(() -> { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request); Mono<RxDocumentServiceRequest> requestObs = this.addPartitionKeyInformation(request, null, null, requestOptions, collectionObs); return requestObs.flux().flatMap(req -> this.readFeed(req) .map(response -> toFeedResponsePage(response, klass))); }, this.resetSessionTokenRetryPolicy.getRequestPolicy()); }; return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize); } private <T extends Resource> Flux<FeedResponse<T>> readFeed(FeedOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) { if (options == null) { options = new FeedOptions(); } int maxPageSize = options.maxItemCount() != null ? options.maxItemCount() : -1; final FeedOptions finalFeedOptions = options; BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> { Map<String, String> requestHeaders = new HashMap<>(); if (continuationToken != null) { requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); } requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize)); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, finalFeedOptions); return request; }; Function<RxDocumentServiceRequest, Flux<FeedResponse<T>>> executeFunc = request -> { return ObservableHelper.inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(response, klass)), this.resetSessionTokenRetryPolicy.getRequestPolicy()); }; return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize); } @Override public Flux<FeedResponse<Offer>> queryOffers(String query, FeedOptions options) { return queryOffers(new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, FeedOptions options) { return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer); } @Override public Flux<DatabaseAccount> getDatabaseAccount() { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy), documentClientRetryPolicy); } private Flux<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Getting Database Account"); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.DatabaseAccount, "", (HashMap<String, String>) null, null); return this.read(request, documentClientRetryPolicy).map(response -> toDatabaseAccount(response)); } catch (Exception e) { logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e); return Flux.error(e); } } public Object getSession() { return this.sessionContainer; } public void setSession(Object sessionContainer) { this.sessionContainer = (SessionContainer) sessionContainer; } public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { return Flux.defer(() -> { RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null); this.populateHeaders(request, HttpConstants.HttpMethods.GET); request.setEndpointOverride(endpoint); return this.gatewayProxy.processMessage(request).doOnError(e -> { String message = String.format("Failed to retrieve database account information. %s", e.getCause() != null ? e.getCause().toString() : e.toString()); logger.warn(message); }).map(rsp -> rsp.getResource(DatabaseAccount.class)) .doOnNext(databaseAccount -> { this.useMultipleWriteLocations = this.connectionPolicy.getUsingMultipleWriteLocations() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount); }); }); } /** * Certain requests must be routed through gateway even when the client connectivity mode is direct. * * @param request * @return RxStoreModel */ private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) { if (request.UseGatewayMode) { return this.gatewayProxy; } ResourceType resourceType = request.getResourceType(); OperationType operationType = request.getOperationType(); if (resourceType == ResourceType.Offer || resourceType.isScript() && operationType != OperationType.ExecuteJavaScript || resourceType == ResourceType.PartitionKeyRange) { return this.gatewayProxy; } if (operationType == OperationType.Create || operationType == OperationType.Upsert) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection || resourceType == ResourceType.Permission) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Delete) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Replace) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Read) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else { if ((request.getOperationType() == OperationType.Query || request.getOperationType() == OperationType.SqlQuery) && Utils.isCollectionChild(request.getResourceType())) { if (request.getPartitionKeyRangeIdentity() == null) { return this.gatewayProxy; } } return this.storeModel; } } @Override public void close() { logger.info("Shutting down ..."); LifeCycleUtils.closeQuietly(this.globalEndpointManager); LifeCycleUtils.closeQuietly(this.storeClientFactory); try { this.reactorHttpClient.shutdown(); } catch (Exception e) { logger.warn("Failure in shutting down reactorHttpClient", e); } } }
no specific reason , just trying to avoid priting whole error stack . I will move to logger.error("Error while parsing diagnostics", e) in next iteration
public String toString() { try { return objectMapper.writeValueAsString(this.clientSideRequestStatistics); }catch (JsonProcessingException e) { logger.error("Error while parsing diagnostics " + e.getOriginalMessage()); } return StringUtils.EMPTY; }
logger.error("Error while parsing diagnostics " + e.getOriginalMessage());
public String toString() { try { return objectMapper.writeValueAsString(this.clientSideRequestStatistics); }catch (JsonProcessingException e) { logger.error("Error while parsing diagnostics " + e); } return StringUtils.EMPTY; }
class CosmosResponseDiagnostics { private static final Logger logger = LoggerFactory.getLogger(CosmosResponseDiagnostics.class); private static final ObjectMapper objectMapper = new ObjectMapper(); private ClientSideRequestStatistics clientSideRequestStatistics; CosmosResponseDiagnostics() { this.clientSideRequestStatistics = new ClientSideRequestStatistics(); } ClientSideRequestStatistics clientSideRequestStatistics() { return clientSideRequestStatistics; } CosmosResponseDiagnostics clientSideRequestStatistics(ClientSideRequestStatistics clientSideRequestStatistics) { this.clientSideRequestStatistics = clientSideRequestStatistics; return this; } /** * Retrieves Response Diagnostic String * * @return Response Diagnostic String */ @Override /** * Retrieves latency related to the completion of the request * * @return request completion latency */ public Duration getRequestLatency() { return this.clientSideRequestStatistics.getRequestLatency(); } }
class CosmosResponseDiagnostics { private static final Logger logger = LoggerFactory.getLogger(CosmosResponseDiagnostics.class); private static final ObjectMapper objectMapper = new ObjectMapper(); private ClientSideRequestStatistics clientSideRequestStatistics; CosmosResponseDiagnostics() { this.clientSideRequestStatistics = new ClientSideRequestStatistics(); } ClientSideRequestStatistics clientSideRequestStatistics() { return clientSideRequestStatistics; } CosmosResponseDiagnostics clientSideRequestStatistics(ClientSideRequestStatistics clientSideRequestStatistics) { this.clientSideRequestStatistics = clientSideRequestStatistics; return this; } /** * Retrieves Response Diagnostic String * * @return Response Diagnostic String */ @Override /** * Retrieves latency related to the completion of the request * * @return request completion latency */ public Duration getRequestLatency() { return this.clientSideRequestStatistics.getRequestLatency(); } }
Yes above is applicable to GW mode , and retry in gateway happens via BackoffRetry.executeRetry and at that place we dont have the access to DocumentServiceRequestContext
private Flux<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, HttpConstants.HttpMethods.GET); if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); request.requestContext.updateRetryContext(documentClientRetryPolicy); } return getStoreProxy(request).processMessage(request); }
request.requestContext.updateRetryContext(documentClientRetryPolicy);
private Flux<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, HttpConstants.HttpMethods.GET); if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); request.requestContext.updateRetryContext(documentClientRetryPolicy, true); } return getStoreProxy(request).processMessage(request); }
class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider { private final static ObjectMapper mapper = Utils.getSimpleObjectMapper(); private final Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class); private final String masterKeyOrResourceToken; private final URI serviceEndpoint; private final ConnectionPolicy connectionPolicy; private final ConsistencyLevel consistencyLevel; private final BaseAuthorizationTokenProvider authorizationTokenProvider; private final UserAgentContainer userAgentContainer; private final boolean hasAuthKeyResourceToken; private final Configs configs; private CosmosKeyCredential cosmosKeyCredential; private TokenResolver tokenResolver; private SessionContainer sessionContainer; private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY; private RxClientCollectionCache collectionCache; private RxStoreModel gatewayProxy; private RxStoreModel storeModel; private GlobalAddressResolver addressResolver; private RxPartitionKeyRangeCache partitionKeyRangeCache; private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap; private IRetryPolicyFactory resetSessionTokenRetryPolicy; /** * Compatibility mode: Allows to specify compatibility mode used by client when * making query requests. Should be removed when application/sql is no longer * supported. */ private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default; private final HttpClient reactorHttpClient; private final GlobalEndpointManager globalEndpointManager; private final RetryPolicy retryPolicy; private volatile boolean useMultipleWriteLocations; private StoreClientFactory storeClientFactory; private GatewayServiceConfigurationReader gatewayConfigurationReader; public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, TokenResolver tokenResolver, CosmosKeyCredential cosmosKeyCredential) { this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, cosmosKeyCredential); this.tokenResolver = tokenResolver; } private RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosKeyCredential cosmosKeyCredential) { this(serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs, cosmosKeyCredential); if (permissionFeed != null && permissionFeed.size() > 0) { this.resourceTokensMap = new HashMap<>(); for (Permission permission : permissionFeed) { String[] segments = StringUtils.split(permission.getResourceLink(), Constants.Properties.PATH_SEPARATOR.charAt(0)); if (segments.length <= 0) { throw new IllegalArgumentException("resourceLink"); } List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null; PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false); if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) { throw new IllegalArgumentException(permission.getResourceLink()); } partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName); if (partitionKeyAndResourceTokenPairs == null) { partitionKeyAndResourceTokenPairs = new ArrayList<>(); this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs); } PartitionKey partitionKey = permission.getResourcePartitionKey(); partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair( partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty, permission.getToken())); logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token", pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken()); } if(this.resourceTokensMap.isEmpty()) { throw new IllegalArgumentException("permissionFeed"); } String firstToken = permissionFeed.get(0).getToken(); if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) { this.firstResourceTokenFromPermissionFeed = firstToken; } } } RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosKeyCredential cosmosKeyCredential) { logger.info( "Initializing DocumentClient with" + " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]", serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol()); this.configs = configs; this.masterKeyOrResourceToken = masterKeyOrResourceToken; this.serviceEndpoint = serviceEndpoint; this.cosmosKeyCredential = cosmosKeyCredential; if (this.cosmosKeyCredential != null) { hasAuthKeyResourceToken = false; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.cosmosKeyCredential); } else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.authorizationTokenProvider = null; hasAuthKeyResourceToken = true; } else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)){ this.cosmosKeyCredential = new CosmosKeyCredential(this.masterKeyOrResourceToken); hasAuthKeyResourceToken = false; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.cosmosKeyCredential); } else { hasAuthKeyResourceToken = false; this.authorizationTokenProvider = null; } if (connectionPolicy != null) { this.connectionPolicy = connectionPolicy; } else { this.connectionPolicy = new ConnectionPolicy(); } this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost()); this.consistencyLevel = consistencyLevel; this.userAgentContainer = new UserAgentContainer(); String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix(); if (userAgentSuffix != null && userAgentSuffix.length() > 0) { userAgentContainer.setSuffix(userAgentSuffix); } this.reactorHttpClient = httpClient(); this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs); this.retryPolicy = new RetryPolicy(this.globalEndpointManager, this.connectionPolicy); this.resetSessionTokenRetryPolicy = retryPolicy; } private void initializeGatewayConfigurationReader() { String resourceToken; if(this.tokenResolver != null) { resourceToken = this.tokenResolver.getAuthorizationToken("GET", "", CosmosResourceType.System, null); } else if(!this.hasAuthKeyResourceToken && this.authorizationTokenProvider == null) { resourceToken = this.firstResourceTokenFromPermissionFeed; } else { assert this.masterKeyOrResourceToken != null || this.cosmosKeyCredential != null; resourceToken = this.masterKeyOrResourceToken; } this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.serviceEndpoint, this.hasAuthKeyResourceToken, resourceToken, this.connectionPolicy, this.authorizationTokenProvider, this.reactorHttpClient); DatabaseAccount databaseAccount = this.gatewayConfigurationReader.initializeReaderAsync().block(); this.useMultipleWriteLocations = this.connectionPolicy.getUsingMultipleWriteLocations() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount); this.globalEndpointManager.refreshLocationAsync(databaseAccount, false).block(); } public void init() { this.gatewayProxy = createRxGatewayProxy(this.sessionContainer, this.consistencyLevel, this.queryCompatibilityMode, this.userAgentContainer, this.globalEndpointManager, this.reactorHttpClient); this.globalEndpointManager.init(); this.initializeGatewayConfigurationReader(); this.collectionCache = new RxClientCollectionCache(this.sessionContainer, this.gatewayProxy, this, this.retryPolicy); this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy); this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this, collectionCache); if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) { this.storeModel = this.gatewayProxy; } else { this.initializeDirectConnectivity(); } } private void initializeDirectConnectivity() { this.storeClientFactory = new StoreClientFactory( this.configs, this.connectionPolicy.getRequestTimeoutInMillis() / 1000, 0, this.userAgentContainer ); this.addressResolver = new GlobalAddressResolver( this.reactorHttpClient, this.globalEndpointManager, this.configs.getProtocol(), this, this.collectionCache, this.partitionKeyRangeCache, userAgentContainer, null, this.connectionPolicy); this.createStoreModel(true); } DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() { return new DatabaseAccountManagerInternal() { @Override public URI getServiceEndpoint() { return RxDocumentClientImpl.this.getServiceEndpoint(); } @Override public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { logger.info("Getting database account endpoint from {}", endpoint); return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint); } @Override public ConnectionPolicy getConnectionPolicy() { return RxDocumentClientImpl.this.getConnectionPolicy(); } }; } RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer, ConsistencyLevel consistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { return new RxGatewayStoreModel(sessionContainer, consistencyLevel, queryCompatibilityMode, userAgentContainer, globalEndpointManager, httpClient); } private HttpClient httpClient() { HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) .withMaxIdleConnectionTimeoutInMillis(this.connectionPolicy.getIdleConnectionTimeoutInMillis()) .withPoolSize(this.connectionPolicy.getMaxPoolSize()) .withHttpProxy(this.connectionPolicy.getProxy()) .withRequestTimeoutInMillis(this.connectionPolicy.getRequestTimeoutInMillis()); return HttpClient.createFixed(httpClientConfig); } private void createStoreModel(boolean subscribeRntbdStatus) { StoreClient storeClient = this.storeClientFactory.createStoreClient( this.addressResolver, this.sessionContainer, this.gatewayConfigurationReader, this, false ); this.storeModel = new ServerStoreModel(storeClient); } @Override public URI getServiceEndpoint() { return this.serviceEndpoint; } @Override public URI getWriteEndpoint() { return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null); } @Override public URI getReadEndpoint() { return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null); } @Override public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } @Override public Flux<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (database == null) { throw new IllegalArgumentException("Database"); } logger.debug("Creating a Database. id: [{}]", database.getId()); validateResource(database); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, database, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Reading a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<Database>> readDatabases(FeedOptions options) { return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT); } private String parentResourceLinkToQueryLink(String parentResouceLink, ResourceType resourceTypeEnum) { switch (resourceTypeEnum) { case Database: return Paths.DATABASES_ROOT; case DocumentCollection: return Utils.joinPath(parentResouceLink, Paths.COLLECTIONS_PATH_SEGMENT); case Document: return Utils.joinPath(parentResouceLink, Paths.DOCUMENTS_PATH_SEGMENT); case Offer: return Paths.OFFERS_ROOT; case User: return Utils.joinPath(parentResouceLink, Paths.USERS_PATH_SEGMENT); case Permission: return Utils.joinPath(parentResouceLink, Paths.PERMISSIONS_PATH_SEGMENT); case Attachment: return Utils.joinPath(parentResouceLink, Paths.ATTACHMENTS_PATH_SEGMENT); case StoredProcedure: return Utils.joinPath(parentResouceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); case Trigger: return Utils.joinPath(parentResouceLink, Paths.TRIGGERS_PATH_SEGMENT); case UserDefinedFunction: return Utils.joinPath(parentResouceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); default: throw new IllegalArgumentException("resource type not supported"); } } private <T extends Resource> Flux<FeedResponse<T>> createQuery( String parentResourceLink, SqlQuerySpec sqlQuery, FeedOptions options, Class<T> klass, ResourceType resourceTypeEnum) { String queryResourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum); UUID activityId = Utils.randomUUID(); IDocumentQueryClient queryClient = DocumentQueryClientImpl(RxDocumentClientImpl.this); Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory.createDocumentQueryExecutionContextAsync(queryClient, resourceTypeEnum, klass, sqlQuery , options, queryResourceLink, false, activityId); return executionContext.flatMap(IDocumentQueryExecutionContext::executeAsync); } @Override public Flux<FeedResponse<Database>> queryDatabases(String query, FeedOptions options) { return queryDatabases(new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, FeedOptions options) { return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database); } @Override public Flux<ResourceResponse<DocumentCollection>> createCollection(String databaseLink, DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink, DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink, collection.getId()); validateResource(collection); String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create, ResourceType.DocumentCollection, path, collection, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); }); } catch (Exception e) { logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Replacing a Collection. id: [{}]", collection.getId()); validateResource(collection); String path = Utils.joinPath(collection.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.DocumentCollection, path, collection, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { if (resourceResponse.getResource() != null) { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); } }); } catch (Exception e) { logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e); return Flux.error(e); } } private Flux<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, HttpConstants.HttpMethods.DELETE); if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); request.requestContext.updateRetryContext(documentClientRetryPolicy); } return getStoreProxy(request).processMessage(request); } Flux<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { populateHeaders(request, HttpConstants.HttpMethods.GET); return gatewayProxy.processMessage(request); } private Flux<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { populateHeaders(request, HttpConstants.HttpMethods.POST); return this.getStoreProxy(request).processMessage(request) .map(response -> { this.captureSessionToken(request, response); return response; } ); } @Override public Flux<ResourceResponse<DocumentCollection>> readCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, FeedOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class, Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query, FeedOptions options) { return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection); } private static String serializeProcedureParams(Object[] objectArray) { String[] stringArray = new String[objectArray.length]; for (int i = 0; i < objectArray.length; ++i) { Object object = objectArray[i]; if (object instanceof JsonSerializable) { stringArray[i] = ((JsonSerializable) object).toJson(); } else { try { stringArray[i] = mapper.writeValueAsString(object); } catch (IOException e) { throw new IllegalArgumentException("Can't serialize the object into the json string", e); } } } return String.format("[%s]", StringUtils.join(stringArray, ",")); } private static void validateResource(Resource resource) { if (!StringUtils.isEmpty(resource.getId())) { if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 || resource.getId().indexOf('?') != -1 || resource.getId().indexOf(' throw new IllegalArgumentException("Id contains illegal chars."); } if (resource.getId().endsWith(" ")) { throw new IllegalArgumentException("Id ends with a space."); } } } private Map<String, String> getRequestHeaders(RequestOptions options) { Map<String, String> headers = new HashMap<>(); if (this.useMultipleWriteLocations) { headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString()); } if (consistencyLevel != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString()); } if (options == null) { return headers; } Map<String, String> customOptions = options.getHeaders(); if (customOptions != null) { headers.putAll(customOptions); } if (options.getAccessCondition() != null) { if (options.getAccessCondition().getType() == AccessConditionType.IF_MATCH) { headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getAccessCondition().getCondition()); } else { headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getAccessCondition().getCondition()); } } if (options.getConsistencyLevel() != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString()); } if (options.getIndexingDirective() != null) { headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString()); } if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) { String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude); } if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) { String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude); } if (!Strings.isNullOrEmpty(options.getSessionToken())) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken()); } if (options.getResourceTokenExpirySeconds() != null) { headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY, String.valueOf(options.getResourceTokenExpirySeconds())); } if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString()); } else if (options.getOfferType() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType()); } if (options.isPopulateQuotaInfo()) { headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true)); } if (options.isScriptLoggingEnabled()) { headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true)); } return headers; } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, String contentAsString, Document document, RequestOptions options) { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request); return collectionObs .map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsString, document, options, collectionValueHolder.v); return request; }); } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, String contentAsString, Object document, RequestOptions options, Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) { return collectionObs.map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsString, document, options, collectionValueHolder.v); return request; }); } private void addPartitionKeyInformation(RxDocumentServiceRequest request, String contentAsString, Object objectDoc, RequestOptions options, DocumentCollection collection) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); PartitionKeyInternal partitionKeyInternal = null; if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){ partitionKeyInternal = BridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else if (options != null && options.getPartitionKey() != null) { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey()); } else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) { partitionKeyInternal = PartitionKeyInternal.getEmpty(); } else if (contentAsString != null) { CosmosItemProperties cosmosItemProperties; if (objectDoc instanceof CosmosItemProperties) { cosmosItemProperties = (CosmosItemProperties) objectDoc; } else { cosmosItemProperties = new CosmosItemProperties(contentAsString); } partitionKeyInternal = extractPartitionKeyValueFromDocument(cosmosItemProperties, partitionKeyDefinition); } else { throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation."); } request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, escapeNonAscii(partitionKeyInternal.toJson())); } private static String escapeNonAscii(String partitionKeyJson) { StringBuilder sb = new StringBuilder(); for (int i = 0; i < partitionKeyJson.length(); i++) { int val = partitionKeyJson.charAt(i); if (val > 127) { sb.append("\\u").append(String.format("%04X", val)); } else { sb.append(partitionKeyJson.charAt(i)); } } return sb.toString(); } private static PartitionKeyInternal extractPartitionKeyValueFromDocument( CosmosItemProperties document, PartitionKeyDefinition partitionKeyDefinition) { if (partitionKeyDefinition != null) { String path = partitionKeyDefinition.getPaths().iterator().next(); List<String> parts = PathParser.getPathParts(path); if (parts.size() >= 1) { Object value = document.getObjectByPath(parts); if (value == null || value.getClass() == ObjectNode.class) { value = BridgeInternal.getNonePartitionKey(partitionKeyDefinition); } if (value instanceof PartitionKeyInternal) { return (PartitionKeyInternal) value; } else { return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false); } } } return null; } private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(String documentCollectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, OperationType operationType) { if (StringUtils.isEmpty(documentCollectionLink)) { throw new IllegalArgumentException("documentCollectionLink"); } if (document == null) { throw new IllegalArgumentException("document"); } String content = toJsonString(document, mapper); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Document, path, requestHeaders, options, content); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request); return addPartitionKeyInformation(request, content, document, options, collectionObs); } private void populateHeaders(RxDocumentServiceRequest request, String httpMethod) { request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null || this.tokenResolver != null || this.cosmosKeyCredential != null) { String resourceName = request.getResourceAddress(); String authorization = this.getUserAuthorizationToken( resourceName, request.getResourceType(), httpMethod, request.getHeaders(), AuthorizationTokenType.PrimaryMasterKey, request.properties); try { authorization = URLEncoder.encode(authorization, "UTF-8"); } catch (UnsupportedEncodingException e) { throw new IllegalStateException("Failed to encode authtoken.", e); } request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); } if ((HttpConstants.HttpMethods.POST.equals(httpMethod) || HttpConstants.HttpMethods.PUT.equals(httpMethod)) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON); } if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) { request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); } } @Override public String getUserAuthorizationToken(String resourceName, ResourceType resourceType, String requestVerb, Map<String, String> headers, AuthorizationTokenType tokenType, Map<String, Object> properties) { if (this.tokenResolver != null) { return this.tokenResolver.getAuthorizationToken(requestVerb, resourceName, this.resolveCosmosResourceType(resourceType), properties != null ? Collections.unmodifiableMap(properties) : null); } else if (cosmosKeyCredential != null) { return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName, resourceType, headers); } else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) { return masterKeyOrResourceToken; } else { assert resourceTokensMap != null; if(resourceType.equals(ResourceType.DatabaseAccount)) { return this.firstResourceTokenFromPermissionFeed; } return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers); } } private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) { try { return CosmosResourceType.valueOf(resourceType.toString()); } catch (IllegalArgumentException e) { return CosmosResourceType.System; } } void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) { this.sessionContainer.setSessionToken(request, response.getResponseHeaders()); } private Flux<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy retryPolicy) { populateHeaders(request, HttpConstants.HttpMethods.POST); RxStoreModel storeProxy = this.getStoreProxy(request); if(request.requestContext != null && retryPolicy.getRetryCount() > 0) { retryPolicy.updateEndTime(); request.requestContext.updateRetryContext(retryPolicy); } return storeProxy.processMessage(request); } private Flux<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, HttpConstants.HttpMethods.POST); Map<String, String> headers = request.getHeaders(); assert (headers != null); headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true"); if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); request.requestContext.updateRetryContext(documentClientRetryPolicy); } return getStoreProxy(request).processMessage(request) .map(response -> { this.captureSessionToken(request, response); return response; } ); } private Flux<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, HttpConstants.HttpMethods.PUT); if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); request.requestContext.updateRetryContext(documentClientRetryPolicy); } return getStoreProxy(request).processMessage(request); } @Override public Flux<ResourceResponse<Document>> createDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy documentClientRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, documentClientRetryPolicy), documentClientRetryPolicy); } private Flux<ResourceResponse<Document>> createDocumentInternal(String collectionLink, Object document, RequestOptions options, final boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy) { try { logger.debug("Creating a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Create); Flux<RxDocumentServiceResponse> responseObservable = requestObs .flux() .flatMap(req -> { if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(req); } return create(req, requestRetryPolicy); }); return responseObservable .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy); } private Flux<ResourceResponse<Document>> upsertDocumentInternal(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink); Flux<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Upsert).flux(); Flux<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(req); } return upsert(req, retryPolicyInstance);}); return responseObservable .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Document>> replaceDocument(String documentLink, Object document, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { String collectionLink = Utils.getCollectionName(documentLink); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(documentLink, document, options, finalRequestRetryPolicy), finalRequestRetryPolicy); } private Flux<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Object document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Document typedDocument = documentFromObject(document, mapper); return this.replaceDocumentInternal(documentLink, typedDocument, options, retryPolicyInstance); } catch (Exception e) { logger.debug("Failure in replacing a document due to [{}]", e.getMessage()); return Flux.error(e); } } @Override public Flux<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { String collectionLink = document.getSelfLink(); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, finalRequestRetryPolicy), requestRetryPolicy); } private Flux<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (document == null) { throw new IllegalArgumentException("document"); } return this.replaceDocumentInternal(document.getSelfLink(), document, options, retryPolicyInstance); } catch (Exception e) { logger.debug("Failure in replacing a database due to [{}]", e.getMessage()); return Flux.error(e); } } private Flux<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { if (document == null) { throw new IllegalArgumentException("document"); } logger.debug("Replacing a Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options); String content = toJsonString(document, mapper); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs); return requestObs.flux().flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return replace(request, retryPolicyInstance) .map(resp -> toResourceResponse(resp, Document.class));} ); } @Override public Flux<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, options, requestRetryPolicy), requestRetryPolicy); } private Flux<ResourceResponse<Document>> deleteDocumentInternal(String documentLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Deleting a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.Document, path, requestHeaders, options); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flux().flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(req); } return this.delete(req, retryPolicyInstance) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));}); } catch (Exception e) { logger.debug("Failure in deleting a document due to [{}]", e.getMessage()); return Flux.error(e); } } @Override public Flux<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Reading a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Document, path, requestHeaders, options); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flux().flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); }); } catch (Exception e) { logger.debug("Failure in reading a document due to [{}]", e.getMessage()); return Flux.error(e); } } @Override public Flux<FeedResponse<Document>> readDocuments(String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return queryDocuments(collectionLink, "SELECT * FROM r", options); } @Override public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, String query, FeedOptions options) { return queryDocuments(collectionLink, new SqlQuerySpec(query), options); } private IDocumentQueryClient DocumentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl) { return new IDocumentQueryClient () { @Override public RxCollectionCache getCollectionCache() { return RxDocumentClientImpl.this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return RxDocumentClientImpl.this.partitionKeyRangeCache; } @Override public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy; } @Override public ConsistencyLevel getDefaultConsistencyLevelAsync() { return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel(); } @Override public ConsistencyLevel getDesiredConsistencyLevelAsync() { return RxDocumentClientImpl.this.consistencyLevel; } @Override public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) { return RxDocumentClientImpl.this.query(request).single(); } @Override public QueryCompatibilityMode getQueryCompatibilityMode() { return QueryCompatibilityMode.Default; } @Override public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) { return null; } }; } @Override public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(collectionLink, querySpec, options, Document.class, ResourceType.Document); } @Override public Flux<FeedResponse<Document>> queryDocumentChangeFeed(final String collectionLink, final ChangeFeedOptions changeFeedOptions) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } ChangeFeedQueryImpl<Document> changeFeedQueryImpl = new ChangeFeedQueryImpl<Document>(this, ResourceType.Document, Document.class, collectionLink, changeFeedOptions); return changeFeedQueryImpl.executeAsync(); } @Override public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class, Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); } private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } validateResource(storedProcedure); String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); return request; } private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (udf == null) { throw new IllegalArgumentException("udf"); } validateResource(udf); String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); return request; } @Override public Flux<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Flux<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Create); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Flux<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Flux<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId()); RxDocumentClientImpl.validateResource(storedProcedure); String path = Utils.joinPath(storedProcedure.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy); } private Flux<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class, Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT)); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query, FeedOptions options) { return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure); } @Override public Flux<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, Object[] procedureParams) { return this.executeStoredProcedure(storedProcedureLink, null, procedureParams); } @Override public Flux<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, RequestOptions options, Object[] procedureParams) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy); } private Flux<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink, RequestOptions options, Object[] procedureParams, DocumentClientRetryPolicy retryPolicy) { try { logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ExecuteJavaScript, ResourceType.StoredProcedure, path, procedureParams != null ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "", requestHeaders, options); Flux<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options).flux(); return reqObs.flatMap(req -> create(request, retryPolicy) .map(response -> { this.captureSessionToken(request, response); return toStoredProcedureResponse(response); })); } catch (Exception e) { logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Upsert); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e); return Flux.error(e); } } private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (trigger == null) { throw new IllegalArgumentException("trigger"); } RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Trigger, path, trigger, requestHeaders, options); return request; } @Override public Flux<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (trigger == null) { throw new IllegalArgumentException("trigger"); } logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId()); RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(trigger.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.Trigger, Trigger.class, Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query, FeedOptions options) { return queryTriggers(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger); } @Override public Flux<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Upsert); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (udf == null) { throw new IllegalArgumentException("udf"); } logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId()); validateResource(udf); String path = Utils.joinPath(udf.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class, Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink, String query, FeedOptions options) { return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction); } @Override public Flux<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Conflict, path, requestHeaders, options); Flux<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options).flux(); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.Conflict, Conflict.class, Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query, FeedOptions options) { return queryConflicts(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict); } @Override public Flux<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options); Flux<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options).flux(); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy); } private Flux<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create); return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e); return Flux.error(e); } } private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (user == null) { throw new IllegalArgumentException("user"); } RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.User, path, user, requestHeaders, options); return request; } @Override public Flux<ResourceResponse<User>> replaceUser(User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (user == null) { throw new IllegalArgumentException("user"); } logger.debug("Replacing a User. user id [{}]", user.getId()); RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(user.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.User, path, user, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e); return Flux.error(e); } } public Flux<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Deleting a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<User>> readUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Reading a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<User>> readUsers(String databaseLink, FeedOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.User, User.class, Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, FeedOptions options) { return queryUsers(databaseLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User); } @Override public Flux<ResourceResponse<Permission>> createPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy()); } private Flux<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Create); return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e); return Flux.error(e); } } private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } if (permission == null) { throw new IllegalArgumentException("permission"); } RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Permission, path, permission, requestHeaders, options); return request; } @Override public Flux<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (permission == null) { throw new IllegalArgumentException("permission"); } logger.debug("Replacing a Permission. permission id [{}]", permission.getId()); RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(permission.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Reading a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<Permission>> readPermissions(String userLink, FeedOptions options) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } return readFeed(options, ResourceType.Permission, Permission.class, Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query, FeedOptions options) { return queryPermissions(userLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission); } @Override public Flux<ResourceResponse<Offer>> replaceOffer(Offer offer) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy); } private Flux<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) { try { if (offer == null) { throw new IllegalArgumentException("offer"); } logger.debug("Replacing an Offer. offer id [{}]", offer.getId()); RxDocumentClientImpl.validateResource(offer); String path = Utils.joinPath(offer.getSelfLink(), null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.Offer, path, offer, null, null); return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Offer>> readOffer(String offerLink) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(offerLink)) { throw new IllegalArgumentException("offerLink"); } logger.debug("Reading an Offer. offerLink [{}]", offerLink); String path = Utils.joinPath(offerLink, null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<Offer>> readOffers(FeedOptions options) { return readFeed(options, ResourceType.Offer, Offer.class, Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null)); } private <T extends Resource> Flux<FeedResponse<T>> readFeedCollectionChild(FeedOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) { if (options == null) { options = new FeedOptions(); } int maxPageSize = options.maxItemCount() != null ? options.maxItemCount() : -1; final FeedOptions finalFeedOptions = options; RequestOptions requestOptions = new RequestOptions(); requestOptions.setPartitionKey(options.partitionKey()); BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> { Map<String, String> requestHeaders = new HashMap<>(); if (continuationToken != null) { requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); } requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize)); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, finalFeedOptions); return request; }; Function<RxDocumentServiceRequest, Flux<FeedResponse<T>>> executeFunc = request -> { return ObservableHelper.inlineIfPossibleAsObs(() -> { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request); Mono<RxDocumentServiceRequest> requestObs = this.addPartitionKeyInformation(request, null, null, requestOptions, collectionObs); return requestObs.flux().flatMap(req -> this.readFeed(req) .map(response -> toFeedResponsePage(response, klass))); }, this.resetSessionTokenRetryPolicy.getRequestPolicy()); }; return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize); } private <T extends Resource> Flux<FeedResponse<T>> readFeed(FeedOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) { if (options == null) { options = new FeedOptions(); } int maxPageSize = options.maxItemCount() != null ? options.maxItemCount() : -1; final FeedOptions finalFeedOptions = options; BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> { Map<String, String> requestHeaders = new HashMap<>(); if (continuationToken != null) { requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); } requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize)); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, finalFeedOptions); return request; }; Function<RxDocumentServiceRequest, Flux<FeedResponse<T>>> executeFunc = request -> { return ObservableHelper.inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(response, klass)), this.resetSessionTokenRetryPolicy.getRequestPolicy()); }; return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize); } @Override public Flux<FeedResponse<Offer>> queryOffers(String query, FeedOptions options) { return queryOffers(new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, FeedOptions options) { return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer); } @Override public Flux<DatabaseAccount> getDatabaseAccount() { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy), documentClientRetryPolicy); } private Flux<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Getting Database Account"); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.DatabaseAccount, "", (HashMap<String, String>) null, null); return this.read(request, documentClientRetryPolicy).map(response -> toDatabaseAccount(response)); } catch (Exception e) { logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e); return Flux.error(e); } } public Object getSession() { return this.sessionContainer; } public void setSession(Object sessionContainer) { this.sessionContainer = (SessionContainer) sessionContainer; } public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { return Flux.defer(() -> { RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null); this.populateHeaders(request, HttpConstants.HttpMethods.GET); request.setEndpointOverride(endpoint); return this.gatewayProxy.processMessage(request).doOnError(e -> { String message = String.format("Failed to retrieve database account information. %s", e.getCause() != null ? e.getCause().toString() : e.toString()); logger.warn(message); }).map(rsp -> rsp.getResource(DatabaseAccount.class)) .doOnNext(databaseAccount -> { this.useMultipleWriteLocations = this.connectionPolicy.getUsingMultipleWriteLocations() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount); }); }); } /** * Certain requests must be routed through gateway even when the client connectivity mode is direct. * * @param request * @return RxStoreModel */ private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) { if (request.UseGatewayMode) { return this.gatewayProxy; } ResourceType resourceType = request.getResourceType(); OperationType operationType = request.getOperationType(); if (resourceType == ResourceType.Offer || resourceType.isScript() && operationType != OperationType.ExecuteJavaScript || resourceType == ResourceType.PartitionKeyRange) { return this.gatewayProxy; } if (operationType == OperationType.Create || operationType == OperationType.Upsert) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection || resourceType == ResourceType.Permission) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Delete) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Replace) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Read) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else { if ((request.getOperationType() == OperationType.Query || request.getOperationType() == OperationType.SqlQuery) && Utils.isCollectionChild(request.getResourceType())) { if (request.getPartitionKeyRangeIdentity() == null) { return this.gatewayProxy; } } return this.storeModel; } } @Override public void close() { logger.info("Shutting down ..."); LifeCycleUtils.closeQuietly(this.globalEndpointManager); LifeCycleUtils.closeQuietly(this.storeClientFactory); try { this.reactorHttpClient.shutdown(); } catch (Exception e) { logger.warn("Failure in shutting down reactorHttpClient", e); } } }
class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider { private final static ObjectMapper mapper = Utils.getSimpleObjectMapper(); private final Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class); private final String masterKeyOrResourceToken; private final URI serviceEndpoint; private final ConnectionPolicy connectionPolicy; private final ConsistencyLevel consistencyLevel; private final BaseAuthorizationTokenProvider authorizationTokenProvider; private final UserAgentContainer userAgentContainer; private final boolean hasAuthKeyResourceToken; private final Configs configs; private CosmosKeyCredential cosmosKeyCredential; private TokenResolver tokenResolver; private SessionContainer sessionContainer; private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY; private RxClientCollectionCache collectionCache; private RxStoreModel gatewayProxy; private RxStoreModel storeModel; private GlobalAddressResolver addressResolver; private RxPartitionKeyRangeCache partitionKeyRangeCache; private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap; private IRetryPolicyFactory resetSessionTokenRetryPolicy; /** * Compatibility mode: Allows to specify compatibility mode used by client when * making query requests. Should be removed when application/sql is no longer * supported. */ private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default; private final HttpClient reactorHttpClient; private final GlobalEndpointManager globalEndpointManager; private final RetryPolicy retryPolicy; private volatile boolean useMultipleWriteLocations; private StoreClientFactory storeClientFactory; private GatewayServiceConfigurationReader gatewayConfigurationReader; public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, TokenResolver tokenResolver, CosmosKeyCredential cosmosKeyCredential) { this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, cosmosKeyCredential); this.tokenResolver = tokenResolver; } private RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosKeyCredential cosmosKeyCredential) { this(serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs, cosmosKeyCredential); if (permissionFeed != null && permissionFeed.size() > 0) { this.resourceTokensMap = new HashMap<>(); for (Permission permission : permissionFeed) { String[] segments = StringUtils.split(permission.getResourceLink(), Constants.Properties.PATH_SEPARATOR.charAt(0)); if (segments.length <= 0) { throw new IllegalArgumentException("resourceLink"); } List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null; PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false); if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) { throw new IllegalArgumentException(permission.getResourceLink()); } partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName); if (partitionKeyAndResourceTokenPairs == null) { partitionKeyAndResourceTokenPairs = new ArrayList<>(); this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs); } PartitionKey partitionKey = permission.getResourcePartitionKey(); partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair( partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty, permission.getToken())); logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token", pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken()); } if(this.resourceTokensMap.isEmpty()) { throw new IllegalArgumentException("permissionFeed"); } String firstToken = permissionFeed.get(0).getToken(); if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) { this.firstResourceTokenFromPermissionFeed = firstToken; } } } RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosKeyCredential cosmosKeyCredential) { logger.info( "Initializing DocumentClient with" + " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]", serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol()); this.configs = configs; this.masterKeyOrResourceToken = masterKeyOrResourceToken; this.serviceEndpoint = serviceEndpoint; this.cosmosKeyCredential = cosmosKeyCredential; if (this.cosmosKeyCredential != null) { hasAuthKeyResourceToken = false; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.cosmosKeyCredential); } else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.authorizationTokenProvider = null; hasAuthKeyResourceToken = true; } else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)){ this.cosmosKeyCredential = new CosmosKeyCredential(this.masterKeyOrResourceToken); hasAuthKeyResourceToken = false; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.cosmosKeyCredential); } else { hasAuthKeyResourceToken = false; this.authorizationTokenProvider = null; } if (connectionPolicy != null) { this.connectionPolicy = connectionPolicy; } else { this.connectionPolicy = new ConnectionPolicy(); } this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost()); this.consistencyLevel = consistencyLevel; this.userAgentContainer = new UserAgentContainer(); String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix(); if (userAgentSuffix != null && userAgentSuffix.length() > 0) { userAgentContainer.setSuffix(userAgentSuffix); } this.reactorHttpClient = httpClient(); this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs); this.retryPolicy = new RetryPolicy(this.globalEndpointManager, this.connectionPolicy); this.resetSessionTokenRetryPolicy = retryPolicy; } private void initializeGatewayConfigurationReader() { String resourceToken; if(this.tokenResolver != null) { resourceToken = this.tokenResolver.getAuthorizationToken("GET", "", CosmosResourceType.System, null); } else if(!this.hasAuthKeyResourceToken && this.authorizationTokenProvider == null) { resourceToken = this.firstResourceTokenFromPermissionFeed; } else { assert this.masterKeyOrResourceToken != null || this.cosmosKeyCredential != null; resourceToken = this.masterKeyOrResourceToken; } this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.serviceEndpoint, this.hasAuthKeyResourceToken, resourceToken, this.connectionPolicy, this.authorizationTokenProvider, this.reactorHttpClient); DatabaseAccount databaseAccount = this.gatewayConfigurationReader.initializeReaderAsync().block(); this.useMultipleWriteLocations = this.connectionPolicy.getUsingMultipleWriteLocations() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount); this.globalEndpointManager.refreshLocationAsync(databaseAccount, false).block(); } public void init() { this.gatewayProxy = createRxGatewayProxy(this.sessionContainer, this.consistencyLevel, this.queryCompatibilityMode, this.userAgentContainer, this.globalEndpointManager, this.reactorHttpClient); this.globalEndpointManager.init(); this.initializeGatewayConfigurationReader(); this.collectionCache = new RxClientCollectionCache(this.sessionContainer, this.gatewayProxy, this, this.retryPolicy); this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy); this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this, collectionCache); if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) { this.storeModel = this.gatewayProxy; } else { this.initializeDirectConnectivity(); } } private void initializeDirectConnectivity() { this.storeClientFactory = new StoreClientFactory( this.configs, this.connectionPolicy.getRequestTimeoutInMillis() / 1000, 0, this.userAgentContainer ); this.addressResolver = new GlobalAddressResolver( this.reactorHttpClient, this.globalEndpointManager, this.configs.getProtocol(), this, this.collectionCache, this.partitionKeyRangeCache, userAgentContainer, null, this.connectionPolicy); this.createStoreModel(true); } DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() { return new DatabaseAccountManagerInternal() { @Override public URI getServiceEndpoint() { return RxDocumentClientImpl.this.getServiceEndpoint(); } @Override public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { logger.info("Getting database account endpoint from {}", endpoint); return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint); } @Override public ConnectionPolicy getConnectionPolicy() { return RxDocumentClientImpl.this.getConnectionPolicy(); } }; } RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer, ConsistencyLevel consistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { return new RxGatewayStoreModel(sessionContainer, consistencyLevel, queryCompatibilityMode, userAgentContainer, globalEndpointManager, httpClient); } private HttpClient httpClient() { HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) .withMaxIdleConnectionTimeoutInMillis(this.connectionPolicy.getIdleConnectionTimeoutInMillis()) .withPoolSize(this.connectionPolicy.getMaxPoolSize()) .withHttpProxy(this.connectionPolicy.getProxy()) .withRequestTimeoutInMillis(this.connectionPolicy.getRequestTimeoutInMillis()); return HttpClient.createFixed(httpClientConfig); } private void createStoreModel(boolean subscribeRntbdStatus) { StoreClient storeClient = this.storeClientFactory.createStoreClient( this.addressResolver, this.sessionContainer, this.gatewayConfigurationReader, this, false ); this.storeModel = new ServerStoreModel(storeClient); } @Override public URI getServiceEndpoint() { return this.serviceEndpoint; } @Override public URI getWriteEndpoint() { return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null); } @Override public URI getReadEndpoint() { return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null); } @Override public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } @Override public Flux<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) { if (database == null) { throw new IllegalArgumentException("Database"); } logger.debug("Creating a Database. id: [{}]", database.getId()); validateResource(database); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, database, requestHeaders, options); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(retryPolicyInstance, request), retryPolicyInstance); } private Flux<ResourceResponse<Database>> createDatabaseInternal(DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { try { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.Database, path, requestHeaders, options); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(retryPolicyInstance, request), retryPolicyInstance); } private Flux<ResourceResponse<Database>> deleteDatabaseInternal(DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { try { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Reading a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Database, path, requestHeaders, options); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(retryPolicyInstance, request), retryPolicyInstance); } private Flux<ResourceResponse<Database>> readDatabaseInternal(DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { try { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<Database>> readDatabases(FeedOptions options) { return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT); } private String parentResourceLinkToQueryLink(String parentResouceLink, ResourceType resourceTypeEnum) { switch (resourceTypeEnum) { case Database: return Paths.DATABASES_ROOT; case DocumentCollection: return Utils.joinPath(parentResouceLink, Paths.COLLECTIONS_PATH_SEGMENT); case Document: return Utils.joinPath(parentResouceLink, Paths.DOCUMENTS_PATH_SEGMENT); case Offer: return Paths.OFFERS_ROOT; case User: return Utils.joinPath(parentResouceLink, Paths.USERS_PATH_SEGMENT); case Permission: return Utils.joinPath(parentResouceLink, Paths.PERMISSIONS_PATH_SEGMENT); case Attachment: return Utils.joinPath(parentResouceLink, Paths.ATTACHMENTS_PATH_SEGMENT); case StoredProcedure: return Utils.joinPath(parentResouceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); case Trigger: return Utils.joinPath(parentResouceLink, Paths.TRIGGERS_PATH_SEGMENT); case UserDefinedFunction: return Utils.joinPath(parentResouceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); default: throw new IllegalArgumentException("resource type not supported"); } } private <T extends Resource> Flux<FeedResponse<T>> createQuery( String parentResourceLink, SqlQuerySpec sqlQuery, FeedOptions options, Class<T> klass, ResourceType resourceTypeEnum) { String queryResourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum); UUID activityId = Utils.randomUUID(); IDocumentQueryClient queryClient = DocumentQueryClientImpl(RxDocumentClientImpl.this); Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory.createDocumentQueryExecutionContextAsync(queryClient, resourceTypeEnum, klass, sqlQuery , options, queryResourceLink, false, activityId); return executionContext.flatMap(IDocumentQueryExecutionContext::executeAsync); } @Override public Flux<FeedResponse<Database>> queryDatabases(String query, FeedOptions options) { return queryDatabases(new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, FeedOptions options) { return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database); } @Override public Flux<ResourceResponse<DocumentCollection>> createCollection(String databaseLink, DocumentCollection collection, RequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink, collection.getId()); validateResource(collection); String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create, ResourceType.DocumentCollection, path, collection, requestHeaders, options); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(retryPolicyInstance, request), retryPolicyInstance); } private Flux<ResourceResponse<DocumentCollection>> createCollectionInternal(DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { try { if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); }); } catch (Exception e) { logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection, RequestOptions options) { if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Replacing a Collection. id: [{}]", collection.getId()); validateResource(collection); String path = Utils.joinPath(collection.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.DocumentCollection, path, collection, requestHeaders, options); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(retryPolicyInstance, request), retryPolicyInstance); } private Flux<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { try { if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { if (resourceResponse.getResource() != null) { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); } }); } catch (Exception e) { logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink, RequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(retryPolicyInstance, request), retryPolicyInstance); } private Flux<ResourceResponse<DocumentCollection>> deleteCollectionInternal(DocumentClientRetryPolicy retryPolicyInstance,RxDocumentServiceRequest request) { try { if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e); return Flux.error(e); } } private Flux<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, HttpConstants.HttpMethods.DELETE); if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); request.requestContext.updateRetryContext(documentClientRetryPolicy, true); } return getStoreProxy(request).processMessage(request); } Flux<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { populateHeaders(request, HttpConstants.HttpMethods.GET); return gatewayProxy.processMessage(request); } private Flux<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { populateHeaders(request, HttpConstants.HttpMethods.POST); return this.getStoreProxy(request).processMessage(request) .map(response -> { this.captureSessionToken(request, response); return response; } ); } @Override public Flux<ResourceResponse<DocumentCollection>> readCollection(String collectionLink, RequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(retryPolicyInstance, request), retryPolicyInstance); } private Flux<ResourceResponse<DocumentCollection>> readCollectionInternal(DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { try { if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, FeedOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class, Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query, FeedOptions options) { return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection); } private static String serializeProcedureParams(Object[] objectArray) { String[] stringArray = new String[objectArray.length]; for (int i = 0; i < objectArray.length; ++i) { Object object = objectArray[i]; if (object instanceof JsonSerializable) { stringArray[i] = ((JsonSerializable) object).toJson(); } else { try { stringArray[i] = mapper.writeValueAsString(object); } catch (IOException e) { throw new IllegalArgumentException("Can't serialize the object into the json string", e); } } } return String.format("[%s]", StringUtils.join(stringArray, ",")); } private static void validateResource(Resource resource) { if (!StringUtils.isEmpty(resource.getId())) { if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 || resource.getId().indexOf('?') != -1 || resource.getId().indexOf(' throw new IllegalArgumentException("Id contains illegal chars."); } if (resource.getId().endsWith(" ")) { throw new IllegalArgumentException("Id ends with a space."); } } } private Map<String, String> getRequestHeaders(RequestOptions options) { Map<String, String> headers = new HashMap<>(); if (this.useMultipleWriteLocations) { headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString()); } if (consistencyLevel != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString()); } if (options == null) { return headers; } Map<String, String> customOptions = options.getHeaders(); if (customOptions != null) { headers.putAll(customOptions); } if (options.getAccessCondition() != null) { if (options.getAccessCondition().getType() == AccessConditionType.IF_MATCH) { headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getAccessCondition().getCondition()); } else { headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getAccessCondition().getCondition()); } } if (options.getConsistencyLevel() != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString()); } if (options.getIndexingDirective() != null) { headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString()); } if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) { String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude); } if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) { String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude); } if (!Strings.isNullOrEmpty(options.getSessionToken())) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken()); } if (options.getResourceTokenExpirySeconds() != null) { headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY, String.valueOf(options.getResourceTokenExpirySeconds())); } if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString()); } else if (options.getOfferType() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType()); } if (options.isPopulateQuotaInfo()) { headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true)); } if (options.isScriptLoggingEnabled()) { headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true)); } return headers; } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, String contentAsString, Document document, RequestOptions options) { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request); return collectionObs .map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsString, document, options, collectionValueHolder.v); return request; }); } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, String contentAsString, Object document, RequestOptions options, Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) { return collectionObs.map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsString, document, options, collectionValueHolder.v); return request; }); } private void addPartitionKeyInformation(RxDocumentServiceRequest request, String contentAsString, Object objectDoc, RequestOptions options, DocumentCollection collection) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); PartitionKeyInternal partitionKeyInternal = null; if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){ partitionKeyInternal = BridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else if (options != null && options.getPartitionKey() != null) { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey()); } else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) { partitionKeyInternal = PartitionKeyInternal.getEmpty(); } else if (contentAsString != null) { CosmosItemProperties cosmosItemProperties; if (objectDoc instanceof CosmosItemProperties) { cosmosItemProperties = (CosmosItemProperties) objectDoc; } else { cosmosItemProperties = new CosmosItemProperties(contentAsString); } partitionKeyInternal = extractPartitionKeyValueFromDocument(cosmosItemProperties, partitionKeyDefinition); } else { throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation."); } request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, escapeNonAscii(partitionKeyInternal.toJson())); } private static String escapeNonAscii(String partitionKeyJson) { StringBuilder sb = new StringBuilder(); for (int i = 0; i < partitionKeyJson.length(); i++) { int val = partitionKeyJson.charAt(i); if (val > 127) { sb.append("\\u").append(String.format("%04X", val)); } else { sb.append(partitionKeyJson.charAt(i)); } } return sb.toString(); } private static PartitionKeyInternal extractPartitionKeyValueFromDocument( CosmosItemProperties document, PartitionKeyDefinition partitionKeyDefinition) { if (partitionKeyDefinition != null) { String path = partitionKeyDefinition.getPaths().iterator().next(); List<String> parts = PathParser.getPathParts(path); if (parts.size() >= 1) { Object value = document.getObjectByPath(parts); if (value == null || value.getClass() == ObjectNode.class) { value = BridgeInternal.getNonePartitionKey(partitionKeyDefinition); } if (value instanceof PartitionKeyInternal) { return (PartitionKeyInternal) value; } else { return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false); } } } return null; } private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(String documentCollectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, OperationType operationType) { if (StringUtils.isEmpty(documentCollectionLink)) { throw new IllegalArgumentException("documentCollectionLink"); } if (document == null) { throw new IllegalArgumentException("document"); } String content = toJsonString(document, mapper); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Document, path, requestHeaders, options, content); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request); return addPartitionKeyInformation(request, content, document, options, collectionObs); } private void populateHeaders(RxDocumentServiceRequest request, String httpMethod) { request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null || this.tokenResolver != null || this.cosmosKeyCredential != null) { String resourceName = request.getResourceAddress(); String authorization = this.getUserAuthorizationToken( resourceName, request.getResourceType(), httpMethod, request.getHeaders(), AuthorizationTokenType.PrimaryMasterKey, request.properties); try { authorization = URLEncoder.encode(authorization, "UTF-8"); } catch (UnsupportedEncodingException e) { throw new IllegalStateException("Failed to encode authtoken.", e); } request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); } if ((HttpConstants.HttpMethods.POST.equals(httpMethod) || HttpConstants.HttpMethods.PUT.equals(httpMethod)) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON); } if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) { request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); } } @Override public String getUserAuthorizationToken(String resourceName, ResourceType resourceType, String requestVerb, Map<String, String> headers, AuthorizationTokenType tokenType, Map<String, Object> properties) { if (this.tokenResolver != null) { return this.tokenResolver.getAuthorizationToken(requestVerb, resourceName, this.resolveCosmosResourceType(resourceType), properties != null ? Collections.unmodifiableMap(properties) : null); } else if (cosmosKeyCredential != null) { return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName, resourceType, headers); } else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) { return masterKeyOrResourceToken; } else { assert resourceTokensMap != null; if(resourceType.equals(ResourceType.DatabaseAccount)) { return this.firstResourceTokenFromPermissionFeed; } return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers); } } private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) { try { return CosmosResourceType.valueOf(resourceType.toString()); } catch (IllegalArgumentException e) { return CosmosResourceType.System; } } void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) { this.sessionContainer.setSessionToken(request, response.getResponseHeaders()); } private Flux<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy retryPolicy) { populateHeaders(request, HttpConstants.HttpMethods.POST); RxStoreModel storeProxy = this.getStoreProxy(request); if(request.requestContext != null && retryPolicy.getRetryCount() > 0) { retryPolicy.updateEndTime(); request.requestContext.updateRetryContext(retryPolicy, true); } return storeProxy.processMessage(request); } private Flux<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, HttpConstants.HttpMethods.POST); Map<String, String> headers = request.getHeaders(); assert (headers != null); headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true"); if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); request.requestContext.updateRetryContext(documentClientRetryPolicy, true); } return getStoreProxy(request).processMessage(request) .map(response -> { this.captureSessionToken(request, response); return response; } ); } private Flux<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, HttpConstants.HttpMethods.PUT); if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); request.requestContext.updateRetryContext(documentClientRetryPolicy, true); } return getStoreProxy(request).processMessage(request); } @Override public Flux<ResourceResponse<Document>> createDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy documentClientRetryPolicy = requestRetryPolicy; logger.debug("Creating a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Create); return requestObs.flux() .flatMap(req -> { return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(documentClientRetryPolicy, req), documentClientRetryPolicy); }); } private Flux<ResourceResponse<Document>> createDocumentInternal(DocumentClientRetryPolicy requestRetryPolicy, RxDocumentServiceRequest request) { try { if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } Flux<RxDocumentServiceResponse> responseObservable = create(request, requestRetryPolicy); return responseObservable .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink); DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; Flux<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Upsert).flux(); return reqObs.flatMap(req -> { return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(finalRetryPolicyInstance, req), finalRetryPolicyInstance); }); } private Flux<ResourceResponse<Document>> upsertDocumentInternal(DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { try { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Flux<RxDocumentServiceResponse> responseObservable = upsert(request, retryPolicyInstance); return responseObservable .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Document>> replaceDocument(String documentLink, Object document, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { String collectionLink = Utils.getCollectionName(documentLink); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; if (document == null) { throw new IllegalArgumentException("document"); } if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Document typedDocument = documentFromObject(document, mapper); logger.debug("Replacing a Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options); String content = toJsonString(typedDocument, mapper); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(typedDocument, options, content, finalRequestRetryPolicy, request), finalRequestRetryPolicy); } @Override public Flux<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { String collectionLink = document.getSelfLink(); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; if (document == null) { throw new IllegalArgumentException("document"); } logger.debug("Replacing a Document. documentLink: [{}]", document.getSelfLink()); final String path = Utils.joinPath(document.getSelfLink(), null); final Map<String, String> requestHeaders = getRequestHeaders(options); String content = toJsonString(document, mapper); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, content, finalRequestRetryPolicy, request), requestRetryPolicy); } private Flux<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, String content, DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { try { if (document == null) { throw new IllegalArgumentException("document"); } return this.replaceDocumentInternal(document.getSelfLink(), document, options, content, retryPolicyInstance, request); } catch (Exception e) { logger.debug("Failure in replacing a database due to [{}]", e.getMessage()); return Flux.error(e); } } private Flux<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Document document, RequestOptions options, String content, DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs); return requestObs.flux().flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return replace(request, retryPolicyInstance) .map(resp -> toResourceResponse(resp, Document.class));} ); } @Override public Flux<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Deleting a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.Document, path, requestHeaders, options); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(options, requestRetryPolicy, request), requestRetryPolicy); } private Flux<ResourceResponse<Document>> deleteDocumentInternal(RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { try { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flux().flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(req); } return this.delete(req, retryPolicyInstance) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));}); } catch (Exception e) { logger.debug("Failure in deleting a document due to [{}]", e.getMessage()); return Flux.error(e); } } @Override public Flux<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Reading a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Document, path, requestHeaders, options); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(options, retryPolicyInstance, request), retryPolicyInstance); } private Flux<ResourceResponse<Document>> readDocumentInternal(RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { try { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flux().flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); }); } catch (Exception e) { logger.debug("Failure in reading a document due to [{}]", e.getMessage()); return Flux.error(e); } } @Override public Flux<FeedResponse<Document>> readDocuments(String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return queryDocuments(collectionLink, "SELECT * FROM r", options); } @Override public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, String query, FeedOptions options) { return queryDocuments(collectionLink, new SqlQuerySpec(query), options); } private IDocumentQueryClient DocumentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl) { return new IDocumentQueryClient () { @Override public RxCollectionCache getCollectionCache() { return RxDocumentClientImpl.this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return RxDocumentClientImpl.this.partitionKeyRangeCache; } @Override public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy; } @Override public ConsistencyLevel getDefaultConsistencyLevelAsync() { return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel(); } @Override public ConsistencyLevel getDesiredConsistencyLevelAsync() { return RxDocumentClientImpl.this.consistencyLevel; } @Override public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) { return RxDocumentClientImpl.this.query(request).single(); } @Override public QueryCompatibilityMode getQueryCompatibilityMode() { return QueryCompatibilityMode.Default; } @Override public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) { return null; } }; } @Override public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(collectionLink, querySpec, options, Document.class, ResourceType.Document); } @Override public Flux<FeedResponse<Document>> queryDocumentChangeFeed(final String collectionLink, final ChangeFeedOptions changeFeedOptions) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } ChangeFeedQueryImpl<Document> changeFeedQueryImpl = new ChangeFeedQueryImpl<Document>(this, ResourceType.Document, Document.class, collectionLink, changeFeedOptions); return changeFeedQueryImpl.executeAsync(); } @Override public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class, Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); } private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } validateResource(storedProcedure); String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); return request; } private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (udf == null) { throw new IllegalArgumentException("udf"); } validateResource(udf); String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); return request; } @Override public Flux<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Flux<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Create); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Flux<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Flux<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId()); RxDocumentClientImpl.validateResource(storedProcedure); String path = Utils.joinPath(storedProcedure.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy); } private Flux<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class, Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT)); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query, FeedOptions options) { return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure); } @Override public Flux<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, Object[] procedureParams) { return this.executeStoredProcedure(storedProcedureLink, null, procedureParams); } @Override public Flux<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, RequestOptions options, Object[] procedureParams) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy); } private Flux<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink, RequestOptions options, Object[] procedureParams, DocumentClientRetryPolicy retryPolicy) { try { logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ExecuteJavaScript, ResourceType.StoredProcedure, path, procedureParams != null ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "", requestHeaders, options); Flux<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options).flux(); return reqObs.flatMap(req -> create(request, retryPolicy) .map(response -> { this.captureSessionToken(request, response); return toStoredProcedureResponse(response); })); } catch (Exception e) { logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Upsert); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e); return Flux.error(e); } } private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (trigger == null) { throw new IllegalArgumentException("trigger"); } RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Trigger, path, trigger, requestHeaders, options); return request; } @Override public Flux<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (trigger == null) { throw new IllegalArgumentException("trigger"); } logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId()); RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(trigger.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.Trigger, Trigger.class, Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query, FeedOptions options) { return queryTriggers(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger); } @Override public Flux<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Upsert); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (udf == null) { throw new IllegalArgumentException("udf"); } logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId()); validateResource(udf); String path = Utils.joinPath(udf.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class, Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink, String query, FeedOptions options) { return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction); } @Override public Flux<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Conflict, path, requestHeaders, options); Flux<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options).flux(); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.Conflict, Conflict.class, Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query, FeedOptions options) { return queryConflicts(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict); } @Override public Flux<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options); Flux<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options).flux(); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy); } private Flux<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create); return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e); return Flux.error(e); } } private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (user == null) { throw new IllegalArgumentException("user"); } RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.User, path, user, requestHeaders, options); return request; } @Override public Flux<ResourceResponse<User>> replaceUser(User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (user == null) { throw new IllegalArgumentException("user"); } logger.debug("Replacing a User. user id [{}]", user.getId()); RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(user.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.User, path, user, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e); return Flux.error(e); } } public Flux<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Deleting a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<User>> readUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Reading a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<User>> readUsers(String databaseLink, FeedOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.User, User.class, Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, FeedOptions options) { return queryUsers(databaseLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User); } @Override public Flux<ResourceResponse<Permission>> createPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy()); } private Flux<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Create); return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e); return Flux.error(e); } } private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } if (permission == null) { throw new IllegalArgumentException("permission"); } RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Permission, path, permission, requestHeaders, options); return request; } @Override public Flux<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (permission == null) { throw new IllegalArgumentException("permission"); } logger.debug("Replacing a Permission. permission id [{}]", permission.getId()); RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(permission.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Reading a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<Permission>> readPermissions(String userLink, FeedOptions options) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } return readFeed(options, ResourceType.Permission, Permission.class, Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query, FeedOptions options) { return queryPermissions(userLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission); } @Override public Flux<ResourceResponse<Offer>> replaceOffer(Offer offer) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy); } private Flux<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) { try { if (offer == null) { throw new IllegalArgumentException("offer"); } logger.debug("Replacing an Offer. offer id [{}]", offer.getId()); RxDocumentClientImpl.validateResource(offer); String path = Utils.joinPath(offer.getSelfLink(), null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.Offer, path, offer, null, null); return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Offer>> readOffer(String offerLink) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(offerLink)) { throw new IllegalArgumentException("offerLink"); } logger.debug("Reading an Offer. offerLink [{}]", offerLink); String path = Utils.joinPath(offerLink, null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<Offer>> readOffers(FeedOptions options) { return readFeed(options, ResourceType.Offer, Offer.class, Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null)); } private <T extends Resource> Flux<FeedResponse<T>> readFeedCollectionChild(FeedOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) { if (options == null) { options = new FeedOptions(); } int maxPageSize = options.maxItemCount() != null ? options.maxItemCount() : -1; final FeedOptions finalFeedOptions = options; RequestOptions requestOptions = new RequestOptions(); requestOptions.setPartitionKey(options.partitionKey()); BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> { Map<String, String> requestHeaders = new HashMap<>(); if (continuationToken != null) { requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); } requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize)); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, finalFeedOptions); return request; }; Function<RxDocumentServiceRequest, Flux<FeedResponse<T>>> executeFunc = request -> { return ObservableHelper.inlineIfPossibleAsObs(() -> { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request); Mono<RxDocumentServiceRequest> requestObs = this.addPartitionKeyInformation(request, null, null, requestOptions, collectionObs); return requestObs.flux().flatMap(req -> this.readFeed(req) .map(response -> toFeedResponsePage(response, klass))); }, this.resetSessionTokenRetryPolicy.getRequestPolicy()); }; return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize); } private <T extends Resource> Flux<FeedResponse<T>> readFeed(FeedOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) { if (options == null) { options = new FeedOptions(); } int maxPageSize = options.maxItemCount() != null ? options.maxItemCount() : -1; final FeedOptions finalFeedOptions = options; BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> { Map<String, String> requestHeaders = new HashMap<>(); if (continuationToken != null) { requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); } requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize)); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, finalFeedOptions); return request; }; Function<RxDocumentServiceRequest, Flux<FeedResponse<T>>> executeFunc = request -> { return ObservableHelper.inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(response, klass)), this.resetSessionTokenRetryPolicy.getRequestPolicy()); }; return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize); } @Override public Flux<FeedResponse<Offer>> queryOffers(String query, FeedOptions options) { return queryOffers(new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, FeedOptions options) { return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer); } @Override public Flux<DatabaseAccount> getDatabaseAccount() { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy), documentClientRetryPolicy); } private Flux<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Getting Database Account"); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.DatabaseAccount, "", (HashMap<String, String>) null, null); return this.read(request, documentClientRetryPolicy).map(response -> toDatabaseAccount(response)); } catch (Exception e) { logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e); return Flux.error(e); } } public Object getSession() { return this.sessionContainer; } public void setSession(Object sessionContainer) { this.sessionContainer = (SessionContainer) sessionContainer; } public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { return Flux.defer(() -> { RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null); this.populateHeaders(request, HttpConstants.HttpMethods.GET); request.setEndpointOverride(endpoint); return this.gatewayProxy.processMessage(request).doOnError(e -> { String message = String.format("Failed to retrieve database account information. %s", e.getCause() != null ? e.getCause().toString() : e.toString()); logger.warn(message); }).map(rsp -> rsp.getResource(DatabaseAccount.class)) .doOnNext(databaseAccount -> { this.useMultipleWriteLocations = this.connectionPolicy.getUsingMultipleWriteLocations() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount); }); }); } /** * Certain requests must be routed through gateway even when the client connectivity mode is direct. * * @param request * @return RxStoreModel */ private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) { if (request.UseGatewayMode) { return this.gatewayProxy; } ResourceType resourceType = request.getResourceType(); OperationType operationType = request.getOperationType(); if (resourceType == ResourceType.Offer || resourceType.isScript() && operationType != OperationType.ExecuteJavaScript || resourceType == ResourceType.PartitionKeyRange) { return this.gatewayProxy; } if (operationType == OperationType.Create || operationType == OperationType.Upsert) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection || resourceType == ResourceType.Permission) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Delete) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Replace) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Read) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else { if ((request.getOperationType() == OperationType.Query || request.getOperationType() == OperationType.SqlQuery) && Utils.isCollectionChild(request.getResourceType())) { if (request.getPartitionKeyRangeIdentity() == null) { return this.gatewayProxy; } } return this.storeModel; } } @Override public void close() { logger.info("Shutting down ..."); LifeCycleUtils.closeQuietly(this.globalEndpointManager); LifeCycleUtils.closeQuietly(this.storeClientFactory); try { this.reactorHttpClient.shutdown(); } catch (Exception e) { logger.warn("Failure in shutting down reactorHttpClient", e); } } }
Discussed offline - "Lets take the example of ReadDocument() In Gateway mode ReadDocument() will keep calling readDocumentInternal() based on BackOffRetryUtility.executeRetry() ,and each retry will call the readDocumentInternal() again and update request.requestContext.updateRetryContext(documentClientRetryPolicy) everytime. However in Direct mode ReadDocument() will call readDocumentInternal(), and in case of any direct exception (Gone) instead of call readDocumentInternal() via BackOffRetryUtility.executeRetry(), replicate resource client will create it own retry policy and consume failure/success and response will eventually be send via StoreClient to ReadDocument , therefore the code request.requestContext.updateRetryContext(documentClientRetryPolicy) in read() in RxDocumentClientImpl will never be executed again."
private Flux<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, HttpConstants.HttpMethods.GET); if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); request.requestContext.updateRetryContext(documentClientRetryPolicy); } return getStoreProxy(request).processMessage(request); }
request.requestContext.updateRetryContext(documentClientRetryPolicy);
private Flux<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, HttpConstants.HttpMethods.GET); if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); request.requestContext.updateRetryContext(documentClientRetryPolicy, true); } return getStoreProxy(request).processMessage(request); }
class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider { private final static ObjectMapper mapper = Utils.getSimpleObjectMapper(); private final Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class); private final String masterKeyOrResourceToken; private final URI serviceEndpoint; private final ConnectionPolicy connectionPolicy; private final ConsistencyLevel consistencyLevel; private final BaseAuthorizationTokenProvider authorizationTokenProvider; private final UserAgentContainer userAgentContainer; private final boolean hasAuthKeyResourceToken; private final Configs configs; private CosmosKeyCredential cosmosKeyCredential; private TokenResolver tokenResolver; private SessionContainer sessionContainer; private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY; private RxClientCollectionCache collectionCache; private RxStoreModel gatewayProxy; private RxStoreModel storeModel; private GlobalAddressResolver addressResolver; private RxPartitionKeyRangeCache partitionKeyRangeCache; private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap; private IRetryPolicyFactory resetSessionTokenRetryPolicy; /** * Compatibility mode: Allows to specify compatibility mode used by client when * making query requests. Should be removed when application/sql is no longer * supported. */ private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default; private final HttpClient reactorHttpClient; private final GlobalEndpointManager globalEndpointManager; private final RetryPolicy retryPolicy; private volatile boolean useMultipleWriteLocations; private StoreClientFactory storeClientFactory; private GatewayServiceConfigurationReader gatewayConfigurationReader; public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, TokenResolver tokenResolver, CosmosKeyCredential cosmosKeyCredential) { this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, cosmosKeyCredential); this.tokenResolver = tokenResolver; } private RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosKeyCredential cosmosKeyCredential) { this(serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs, cosmosKeyCredential); if (permissionFeed != null && permissionFeed.size() > 0) { this.resourceTokensMap = new HashMap<>(); for (Permission permission : permissionFeed) { String[] segments = StringUtils.split(permission.getResourceLink(), Constants.Properties.PATH_SEPARATOR.charAt(0)); if (segments.length <= 0) { throw new IllegalArgumentException("resourceLink"); } List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null; PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false); if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) { throw new IllegalArgumentException(permission.getResourceLink()); } partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName); if (partitionKeyAndResourceTokenPairs == null) { partitionKeyAndResourceTokenPairs = new ArrayList<>(); this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs); } PartitionKey partitionKey = permission.getResourcePartitionKey(); partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair( partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty, permission.getToken())); logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token", pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken()); } if(this.resourceTokensMap.isEmpty()) { throw new IllegalArgumentException("permissionFeed"); } String firstToken = permissionFeed.get(0).getToken(); if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) { this.firstResourceTokenFromPermissionFeed = firstToken; } } } RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosKeyCredential cosmosKeyCredential) { logger.info( "Initializing DocumentClient with" + " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]", serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol()); this.configs = configs; this.masterKeyOrResourceToken = masterKeyOrResourceToken; this.serviceEndpoint = serviceEndpoint; this.cosmosKeyCredential = cosmosKeyCredential; if (this.cosmosKeyCredential != null) { hasAuthKeyResourceToken = false; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.cosmosKeyCredential); } else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.authorizationTokenProvider = null; hasAuthKeyResourceToken = true; } else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)){ this.cosmosKeyCredential = new CosmosKeyCredential(this.masterKeyOrResourceToken); hasAuthKeyResourceToken = false; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.cosmosKeyCredential); } else { hasAuthKeyResourceToken = false; this.authorizationTokenProvider = null; } if (connectionPolicy != null) { this.connectionPolicy = connectionPolicy; } else { this.connectionPolicy = new ConnectionPolicy(); } this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost()); this.consistencyLevel = consistencyLevel; this.userAgentContainer = new UserAgentContainer(); String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix(); if (userAgentSuffix != null && userAgentSuffix.length() > 0) { userAgentContainer.setSuffix(userAgentSuffix); } this.reactorHttpClient = httpClient(); this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs); this.retryPolicy = new RetryPolicy(this.globalEndpointManager, this.connectionPolicy); this.resetSessionTokenRetryPolicy = retryPolicy; } private void initializeGatewayConfigurationReader() { String resourceToken; if(this.tokenResolver != null) { resourceToken = this.tokenResolver.getAuthorizationToken("GET", "", CosmosResourceType.System, null); } else if(!this.hasAuthKeyResourceToken && this.authorizationTokenProvider == null) { resourceToken = this.firstResourceTokenFromPermissionFeed; } else { assert this.masterKeyOrResourceToken != null || this.cosmosKeyCredential != null; resourceToken = this.masterKeyOrResourceToken; } this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.serviceEndpoint, this.hasAuthKeyResourceToken, resourceToken, this.connectionPolicy, this.authorizationTokenProvider, this.reactorHttpClient); DatabaseAccount databaseAccount = this.gatewayConfigurationReader.initializeReaderAsync().block(); this.useMultipleWriteLocations = this.connectionPolicy.getUsingMultipleWriteLocations() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount); this.globalEndpointManager.refreshLocationAsync(databaseAccount, false).block(); } public void init() { this.gatewayProxy = createRxGatewayProxy(this.sessionContainer, this.consistencyLevel, this.queryCompatibilityMode, this.userAgentContainer, this.globalEndpointManager, this.reactorHttpClient); this.globalEndpointManager.init(); this.initializeGatewayConfigurationReader(); this.collectionCache = new RxClientCollectionCache(this.sessionContainer, this.gatewayProxy, this, this.retryPolicy); this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy); this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this, collectionCache); if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) { this.storeModel = this.gatewayProxy; } else { this.initializeDirectConnectivity(); } } private void initializeDirectConnectivity() { this.storeClientFactory = new StoreClientFactory( this.configs, this.connectionPolicy.getRequestTimeoutInMillis() / 1000, 0, this.userAgentContainer ); this.addressResolver = new GlobalAddressResolver( this.reactorHttpClient, this.globalEndpointManager, this.configs.getProtocol(), this, this.collectionCache, this.partitionKeyRangeCache, userAgentContainer, null, this.connectionPolicy); this.createStoreModel(true); } DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() { return new DatabaseAccountManagerInternal() { @Override public URI getServiceEndpoint() { return RxDocumentClientImpl.this.getServiceEndpoint(); } @Override public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { logger.info("Getting database account endpoint from {}", endpoint); return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint); } @Override public ConnectionPolicy getConnectionPolicy() { return RxDocumentClientImpl.this.getConnectionPolicy(); } }; } RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer, ConsistencyLevel consistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { return new RxGatewayStoreModel(sessionContainer, consistencyLevel, queryCompatibilityMode, userAgentContainer, globalEndpointManager, httpClient); } private HttpClient httpClient() { HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) .withMaxIdleConnectionTimeoutInMillis(this.connectionPolicy.getIdleConnectionTimeoutInMillis()) .withPoolSize(this.connectionPolicy.getMaxPoolSize()) .withHttpProxy(this.connectionPolicy.getProxy()) .withRequestTimeoutInMillis(this.connectionPolicy.getRequestTimeoutInMillis()); return HttpClient.createFixed(httpClientConfig); } private void createStoreModel(boolean subscribeRntbdStatus) { StoreClient storeClient = this.storeClientFactory.createStoreClient( this.addressResolver, this.sessionContainer, this.gatewayConfigurationReader, this, false ); this.storeModel = new ServerStoreModel(storeClient); } @Override public URI getServiceEndpoint() { return this.serviceEndpoint; } @Override public URI getWriteEndpoint() { return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null); } @Override public URI getReadEndpoint() { return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null); } @Override public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } @Override public Flux<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (database == null) { throw new IllegalArgumentException("Database"); } logger.debug("Creating a Database. id: [{}]", database.getId()); validateResource(database); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, database, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Reading a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<Database>> readDatabases(FeedOptions options) { return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT); } private String parentResourceLinkToQueryLink(String parentResouceLink, ResourceType resourceTypeEnum) { switch (resourceTypeEnum) { case Database: return Paths.DATABASES_ROOT; case DocumentCollection: return Utils.joinPath(parentResouceLink, Paths.COLLECTIONS_PATH_SEGMENT); case Document: return Utils.joinPath(parentResouceLink, Paths.DOCUMENTS_PATH_SEGMENT); case Offer: return Paths.OFFERS_ROOT; case User: return Utils.joinPath(parentResouceLink, Paths.USERS_PATH_SEGMENT); case Permission: return Utils.joinPath(parentResouceLink, Paths.PERMISSIONS_PATH_SEGMENT); case Attachment: return Utils.joinPath(parentResouceLink, Paths.ATTACHMENTS_PATH_SEGMENT); case StoredProcedure: return Utils.joinPath(parentResouceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); case Trigger: return Utils.joinPath(parentResouceLink, Paths.TRIGGERS_PATH_SEGMENT); case UserDefinedFunction: return Utils.joinPath(parentResouceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); default: throw new IllegalArgumentException("resource type not supported"); } } private <T extends Resource> Flux<FeedResponse<T>> createQuery( String parentResourceLink, SqlQuerySpec sqlQuery, FeedOptions options, Class<T> klass, ResourceType resourceTypeEnum) { String queryResourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum); UUID activityId = Utils.randomUUID(); IDocumentQueryClient queryClient = DocumentQueryClientImpl(RxDocumentClientImpl.this); Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory.createDocumentQueryExecutionContextAsync(queryClient, resourceTypeEnum, klass, sqlQuery , options, queryResourceLink, false, activityId); return executionContext.flatMap(IDocumentQueryExecutionContext::executeAsync); } @Override public Flux<FeedResponse<Database>> queryDatabases(String query, FeedOptions options) { return queryDatabases(new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, FeedOptions options) { return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database); } @Override public Flux<ResourceResponse<DocumentCollection>> createCollection(String databaseLink, DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink, DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink, collection.getId()); validateResource(collection); String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create, ResourceType.DocumentCollection, path, collection, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); }); } catch (Exception e) { logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Replacing a Collection. id: [{}]", collection.getId()); validateResource(collection); String path = Utils.joinPath(collection.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.DocumentCollection, path, collection, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { if (resourceResponse.getResource() != null) { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); } }); } catch (Exception e) { logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e); return Flux.error(e); } } private Flux<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, HttpConstants.HttpMethods.DELETE); if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); request.requestContext.updateRetryContext(documentClientRetryPolicy); } return getStoreProxy(request).processMessage(request); } Flux<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { populateHeaders(request, HttpConstants.HttpMethods.GET); return gatewayProxy.processMessage(request); } private Flux<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { populateHeaders(request, HttpConstants.HttpMethods.POST); return this.getStoreProxy(request).processMessage(request) .map(response -> { this.captureSessionToken(request, response); return response; } ); } @Override public Flux<ResourceResponse<DocumentCollection>> readCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, FeedOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class, Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query, FeedOptions options) { return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection); } private static String serializeProcedureParams(Object[] objectArray) { String[] stringArray = new String[objectArray.length]; for (int i = 0; i < objectArray.length; ++i) { Object object = objectArray[i]; if (object instanceof JsonSerializable) { stringArray[i] = ((JsonSerializable) object).toJson(); } else { try { stringArray[i] = mapper.writeValueAsString(object); } catch (IOException e) { throw new IllegalArgumentException("Can't serialize the object into the json string", e); } } } return String.format("[%s]", StringUtils.join(stringArray, ",")); } private static void validateResource(Resource resource) { if (!StringUtils.isEmpty(resource.getId())) { if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 || resource.getId().indexOf('?') != -1 || resource.getId().indexOf(' throw new IllegalArgumentException("Id contains illegal chars."); } if (resource.getId().endsWith(" ")) { throw new IllegalArgumentException("Id ends with a space."); } } } private Map<String, String> getRequestHeaders(RequestOptions options) { Map<String, String> headers = new HashMap<>(); if (this.useMultipleWriteLocations) { headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString()); } if (consistencyLevel != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString()); } if (options == null) { return headers; } Map<String, String> customOptions = options.getHeaders(); if (customOptions != null) { headers.putAll(customOptions); } if (options.getAccessCondition() != null) { if (options.getAccessCondition().getType() == AccessConditionType.IF_MATCH) { headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getAccessCondition().getCondition()); } else { headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getAccessCondition().getCondition()); } } if (options.getConsistencyLevel() != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString()); } if (options.getIndexingDirective() != null) { headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString()); } if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) { String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude); } if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) { String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude); } if (!Strings.isNullOrEmpty(options.getSessionToken())) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken()); } if (options.getResourceTokenExpirySeconds() != null) { headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY, String.valueOf(options.getResourceTokenExpirySeconds())); } if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString()); } else if (options.getOfferType() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType()); } if (options.isPopulateQuotaInfo()) { headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true)); } if (options.isScriptLoggingEnabled()) { headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true)); } return headers; } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, String contentAsString, Document document, RequestOptions options) { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request); return collectionObs .map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsString, document, options, collectionValueHolder.v); return request; }); } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, String contentAsString, Object document, RequestOptions options, Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) { return collectionObs.map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsString, document, options, collectionValueHolder.v); return request; }); } private void addPartitionKeyInformation(RxDocumentServiceRequest request, String contentAsString, Object objectDoc, RequestOptions options, DocumentCollection collection) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); PartitionKeyInternal partitionKeyInternal = null; if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){ partitionKeyInternal = BridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else if (options != null && options.getPartitionKey() != null) { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey()); } else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) { partitionKeyInternal = PartitionKeyInternal.getEmpty(); } else if (contentAsString != null) { CosmosItemProperties cosmosItemProperties; if (objectDoc instanceof CosmosItemProperties) { cosmosItemProperties = (CosmosItemProperties) objectDoc; } else { cosmosItemProperties = new CosmosItemProperties(contentAsString); } partitionKeyInternal = extractPartitionKeyValueFromDocument(cosmosItemProperties, partitionKeyDefinition); } else { throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation."); } request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, escapeNonAscii(partitionKeyInternal.toJson())); } private static String escapeNonAscii(String partitionKeyJson) { StringBuilder sb = new StringBuilder(); for (int i = 0; i < partitionKeyJson.length(); i++) { int val = partitionKeyJson.charAt(i); if (val > 127) { sb.append("\\u").append(String.format("%04X", val)); } else { sb.append(partitionKeyJson.charAt(i)); } } return sb.toString(); } private static PartitionKeyInternal extractPartitionKeyValueFromDocument( CosmosItemProperties document, PartitionKeyDefinition partitionKeyDefinition) { if (partitionKeyDefinition != null) { String path = partitionKeyDefinition.getPaths().iterator().next(); List<String> parts = PathParser.getPathParts(path); if (parts.size() >= 1) { Object value = document.getObjectByPath(parts); if (value == null || value.getClass() == ObjectNode.class) { value = BridgeInternal.getNonePartitionKey(partitionKeyDefinition); } if (value instanceof PartitionKeyInternal) { return (PartitionKeyInternal) value; } else { return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false); } } } return null; } private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(String documentCollectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, OperationType operationType) { if (StringUtils.isEmpty(documentCollectionLink)) { throw new IllegalArgumentException("documentCollectionLink"); } if (document == null) { throw new IllegalArgumentException("document"); } String content = toJsonString(document, mapper); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Document, path, requestHeaders, options, content); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request); return addPartitionKeyInformation(request, content, document, options, collectionObs); } private void populateHeaders(RxDocumentServiceRequest request, String httpMethod) { request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null || this.tokenResolver != null || this.cosmosKeyCredential != null) { String resourceName = request.getResourceAddress(); String authorization = this.getUserAuthorizationToken( resourceName, request.getResourceType(), httpMethod, request.getHeaders(), AuthorizationTokenType.PrimaryMasterKey, request.properties); try { authorization = URLEncoder.encode(authorization, "UTF-8"); } catch (UnsupportedEncodingException e) { throw new IllegalStateException("Failed to encode authtoken.", e); } request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); } if ((HttpConstants.HttpMethods.POST.equals(httpMethod) || HttpConstants.HttpMethods.PUT.equals(httpMethod)) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON); } if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) { request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); } } @Override public String getUserAuthorizationToken(String resourceName, ResourceType resourceType, String requestVerb, Map<String, String> headers, AuthorizationTokenType tokenType, Map<String, Object> properties) { if (this.tokenResolver != null) { return this.tokenResolver.getAuthorizationToken(requestVerb, resourceName, this.resolveCosmosResourceType(resourceType), properties != null ? Collections.unmodifiableMap(properties) : null); } else if (cosmosKeyCredential != null) { return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName, resourceType, headers); } else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) { return masterKeyOrResourceToken; } else { assert resourceTokensMap != null; if(resourceType.equals(ResourceType.DatabaseAccount)) { return this.firstResourceTokenFromPermissionFeed; } return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers); } } private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) { try { return CosmosResourceType.valueOf(resourceType.toString()); } catch (IllegalArgumentException e) { return CosmosResourceType.System; } } void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) { this.sessionContainer.setSessionToken(request, response.getResponseHeaders()); } private Flux<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy retryPolicy) { populateHeaders(request, HttpConstants.HttpMethods.POST); RxStoreModel storeProxy = this.getStoreProxy(request); if(request.requestContext != null && retryPolicy.getRetryCount() > 0) { retryPolicy.updateEndTime(); request.requestContext.updateRetryContext(retryPolicy); } return storeProxy.processMessage(request); } private Flux<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, HttpConstants.HttpMethods.POST); Map<String, String> headers = request.getHeaders(); assert (headers != null); headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true"); if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); request.requestContext.updateRetryContext(documentClientRetryPolicy); } return getStoreProxy(request).processMessage(request) .map(response -> { this.captureSessionToken(request, response); return response; } ); } private Flux<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, HttpConstants.HttpMethods.PUT); if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); request.requestContext.updateRetryContext(documentClientRetryPolicy); } return getStoreProxy(request).processMessage(request); } @Override public Flux<ResourceResponse<Document>> createDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy documentClientRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, documentClientRetryPolicy), documentClientRetryPolicy); } private Flux<ResourceResponse<Document>> createDocumentInternal(String collectionLink, Object document, RequestOptions options, final boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy) { try { logger.debug("Creating a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Create); Flux<RxDocumentServiceResponse> responseObservable = requestObs .flux() .flatMap(req -> { if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(req); } return create(req, requestRetryPolicy); }); return responseObservable .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(collectionLink, document, options, disableAutomaticIdGeneration, finalRetryPolicyInstance), requestRetryPolicy); } private Flux<ResourceResponse<Document>> upsertDocumentInternal(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink); Flux<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Upsert).flux(); Flux<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(req); } return upsert(req, retryPolicyInstance);}); return responseObservable .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Document>> replaceDocument(String documentLink, Object document, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { String collectionLink = Utils.getCollectionName(documentLink); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(documentLink, document, options, finalRequestRetryPolicy), finalRequestRetryPolicy); } private Flux<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Object document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Document typedDocument = documentFromObject(document, mapper); return this.replaceDocumentInternal(documentLink, typedDocument, options, retryPolicyInstance); } catch (Exception e) { logger.debug("Failure in replacing a document due to [{}]", e.getMessage()); return Flux.error(e); } } @Override public Flux<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { String collectionLink = document.getSelfLink(); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, finalRequestRetryPolicy), requestRetryPolicy); } private Flux<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (document == null) { throw new IllegalArgumentException("document"); } return this.replaceDocumentInternal(document.getSelfLink(), document, options, retryPolicyInstance); } catch (Exception e) { logger.debug("Failure in replacing a database due to [{}]", e.getMessage()); return Flux.error(e); } } private Flux<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { if (document == null) { throw new IllegalArgumentException("document"); } logger.debug("Replacing a Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options); String content = toJsonString(document, mapper); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs); return requestObs.flux().flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return replace(request, retryPolicyInstance) .map(resp -> toResourceResponse(resp, Document.class));} ); } @Override public Flux<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(documentLink, options, requestRetryPolicy), requestRetryPolicy); } private Flux<ResourceResponse<Document>> deleteDocumentInternal(String documentLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Deleting a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.Document, path, requestHeaders, options); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flux().flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(req); } return this.delete(req, retryPolicyInstance) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));}); } catch (Exception e) { logger.debug("Failure in deleting a document due to [{}]", e.getMessage()); return Flux.error(e); } } @Override public Flux<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Reading a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Document, path, requestHeaders, options); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flux().flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); }); } catch (Exception e) { logger.debug("Failure in reading a document due to [{}]", e.getMessage()); return Flux.error(e); } } @Override public Flux<FeedResponse<Document>> readDocuments(String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return queryDocuments(collectionLink, "SELECT * FROM r", options); } @Override public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, String query, FeedOptions options) { return queryDocuments(collectionLink, new SqlQuerySpec(query), options); } private IDocumentQueryClient DocumentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl) { return new IDocumentQueryClient () { @Override public RxCollectionCache getCollectionCache() { return RxDocumentClientImpl.this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return RxDocumentClientImpl.this.partitionKeyRangeCache; } @Override public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy; } @Override public ConsistencyLevel getDefaultConsistencyLevelAsync() { return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel(); } @Override public ConsistencyLevel getDesiredConsistencyLevelAsync() { return RxDocumentClientImpl.this.consistencyLevel; } @Override public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) { return RxDocumentClientImpl.this.query(request).single(); } @Override public QueryCompatibilityMode getQueryCompatibilityMode() { return QueryCompatibilityMode.Default; } @Override public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) { return null; } }; } @Override public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(collectionLink, querySpec, options, Document.class, ResourceType.Document); } @Override public Flux<FeedResponse<Document>> queryDocumentChangeFeed(final String collectionLink, final ChangeFeedOptions changeFeedOptions) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } ChangeFeedQueryImpl<Document> changeFeedQueryImpl = new ChangeFeedQueryImpl<Document>(this, ResourceType.Document, Document.class, collectionLink, changeFeedOptions); return changeFeedQueryImpl.executeAsync(); } @Override public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class, Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); } private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } validateResource(storedProcedure); String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); return request; } private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (udf == null) { throw new IllegalArgumentException("udf"); } validateResource(udf); String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); return request; } @Override public Flux<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Flux<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Create); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Flux<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Flux<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId()); RxDocumentClientImpl.validateResource(storedProcedure); String path = Utils.joinPath(storedProcedure.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy); } private Flux<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class, Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT)); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query, FeedOptions options) { return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure); } @Override public Flux<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, Object[] procedureParams) { return this.executeStoredProcedure(storedProcedureLink, null, procedureParams); } @Override public Flux<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, RequestOptions options, Object[] procedureParams) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy); } private Flux<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink, RequestOptions options, Object[] procedureParams, DocumentClientRetryPolicy retryPolicy) { try { logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ExecuteJavaScript, ResourceType.StoredProcedure, path, procedureParams != null ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "", requestHeaders, options); Flux<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options).flux(); return reqObs.flatMap(req -> create(request, retryPolicy) .map(response -> { this.captureSessionToken(request, response); return toStoredProcedureResponse(response); })); } catch (Exception e) { logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Upsert); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e); return Flux.error(e); } } private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (trigger == null) { throw new IllegalArgumentException("trigger"); } RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Trigger, path, trigger, requestHeaders, options); return request; } @Override public Flux<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (trigger == null) { throw new IllegalArgumentException("trigger"); } logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId()); RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(trigger.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.Trigger, Trigger.class, Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query, FeedOptions options) { return queryTriggers(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger); } @Override public Flux<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Upsert); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (udf == null) { throw new IllegalArgumentException("udf"); } logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId()); validateResource(udf); String path = Utils.joinPath(udf.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class, Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink, String query, FeedOptions options) { return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction); } @Override public Flux<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Conflict, path, requestHeaders, options); Flux<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options).flux(); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.Conflict, Conflict.class, Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query, FeedOptions options) { return queryConflicts(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict); } @Override public Flux<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options); Flux<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options).flux(); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy); } private Flux<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create); return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e); return Flux.error(e); } } private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (user == null) { throw new IllegalArgumentException("user"); } RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.User, path, user, requestHeaders, options); return request; } @Override public Flux<ResourceResponse<User>> replaceUser(User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (user == null) { throw new IllegalArgumentException("user"); } logger.debug("Replacing a User. user id [{}]", user.getId()); RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(user.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.User, path, user, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e); return Flux.error(e); } } public Flux<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Deleting a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<User>> readUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Reading a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<User>> readUsers(String databaseLink, FeedOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.User, User.class, Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, FeedOptions options) { return queryUsers(databaseLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User); } @Override public Flux<ResourceResponse<Permission>> createPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy()); } private Flux<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Create); return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e); return Flux.error(e); } } private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } if (permission == null) { throw new IllegalArgumentException("permission"); } RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Permission, path, permission, requestHeaders, options); return request; } @Override public Flux<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (permission == null) { throw new IllegalArgumentException("permission"); } logger.debug("Replacing a Permission. permission id [{}]", permission.getId()); RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(permission.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Reading a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<Permission>> readPermissions(String userLink, FeedOptions options) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } return readFeed(options, ResourceType.Permission, Permission.class, Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query, FeedOptions options) { return queryPermissions(userLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission); } @Override public Flux<ResourceResponse<Offer>> replaceOffer(Offer offer) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy); } private Flux<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) { try { if (offer == null) { throw new IllegalArgumentException("offer"); } logger.debug("Replacing an Offer. offer id [{}]", offer.getId()); RxDocumentClientImpl.validateResource(offer); String path = Utils.joinPath(offer.getSelfLink(), null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.Offer, path, offer, null, null); return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Offer>> readOffer(String offerLink) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(offerLink)) { throw new IllegalArgumentException("offerLink"); } logger.debug("Reading an Offer. offerLink [{}]", offerLink); String path = Utils.joinPath(offerLink, null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<Offer>> readOffers(FeedOptions options) { return readFeed(options, ResourceType.Offer, Offer.class, Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null)); } private <T extends Resource> Flux<FeedResponse<T>> readFeedCollectionChild(FeedOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) { if (options == null) { options = new FeedOptions(); } int maxPageSize = options.maxItemCount() != null ? options.maxItemCount() : -1; final FeedOptions finalFeedOptions = options; RequestOptions requestOptions = new RequestOptions(); requestOptions.setPartitionKey(options.partitionKey()); BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> { Map<String, String> requestHeaders = new HashMap<>(); if (continuationToken != null) { requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); } requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize)); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, finalFeedOptions); return request; }; Function<RxDocumentServiceRequest, Flux<FeedResponse<T>>> executeFunc = request -> { return ObservableHelper.inlineIfPossibleAsObs(() -> { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request); Mono<RxDocumentServiceRequest> requestObs = this.addPartitionKeyInformation(request, null, null, requestOptions, collectionObs); return requestObs.flux().flatMap(req -> this.readFeed(req) .map(response -> toFeedResponsePage(response, klass))); }, this.resetSessionTokenRetryPolicy.getRequestPolicy()); }; return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize); } private <T extends Resource> Flux<FeedResponse<T>> readFeed(FeedOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) { if (options == null) { options = new FeedOptions(); } int maxPageSize = options.maxItemCount() != null ? options.maxItemCount() : -1; final FeedOptions finalFeedOptions = options; BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> { Map<String, String> requestHeaders = new HashMap<>(); if (continuationToken != null) { requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); } requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize)); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, finalFeedOptions); return request; }; Function<RxDocumentServiceRequest, Flux<FeedResponse<T>>> executeFunc = request -> { return ObservableHelper.inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(response, klass)), this.resetSessionTokenRetryPolicy.getRequestPolicy()); }; return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize); } @Override public Flux<FeedResponse<Offer>> queryOffers(String query, FeedOptions options) { return queryOffers(new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, FeedOptions options) { return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer); } @Override public Flux<DatabaseAccount> getDatabaseAccount() { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy), documentClientRetryPolicy); } private Flux<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Getting Database Account"); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.DatabaseAccount, "", (HashMap<String, String>) null, null); return this.read(request, documentClientRetryPolicy).map(response -> toDatabaseAccount(response)); } catch (Exception e) { logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e); return Flux.error(e); } } public Object getSession() { return this.sessionContainer; } public void setSession(Object sessionContainer) { this.sessionContainer = (SessionContainer) sessionContainer; } public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { return Flux.defer(() -> { RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null); this.populateHeaders(request, HttpConstants.HttpMethods.GET); request.setEndpointOverride(endpoint); return this.gatewayProxy.processMessage(request).doOnError(e -> { String message = String.format("Failed to retrieve database account information. %s", e.getCause() != null ? e.getCause().toString() : e.toString()); logger.warn(message); }).map(rsp -> rsp.getResource(DatabaseAccount.class)) .doOnNext(databaseAccount -> { this.useMultipleWriteLocations = this.connectionPolicy.getUsingMultipleWriteLocations() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount); }); }); } /** * Certain requests must be routed through gateway even when the client connectivity mode is direct. * * @param request * @return RxStoreModel */ private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) { if (request.UseGatewayMode) { return this.gatewayProxy; } ResourceType resourceType = request.getResourceType(); OperationType operationType = request.getOperationType(); if (resourceType == ResourceType.Offer || resourceType.isScript() && operationType != OperationType.ExecuteJavaScript || resourceType == ResourceType.PartitionKeyRange) { return this.gatewayProxy; } if (operationType == OperationType.Create || operationType == OperationType.Upsert) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection || resourceType == ResourceType.Permission) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Delete) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Replace) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Read) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else { if ((request.getOperationType() == OperationType.Query || request.getOperationType() == OperationType.SqlQuery) && Utils.isCollectionChild(request.getResourceType())) { if (request.getPartitionKeyRangeIdentity() == null) { return this.gatewayProxy; } } return this.storeModel; } } @Override public void close() { logger.info("Shutting down ..."); LifeCycleUtils.closeQuietly(this.globalEndpointManager); LifeCycleUtils.closeQuietly(this.storeClientFactory); try { this.reactorHttpClient.shutdown(); } catch (Exception e) { logger.warn("Failure in shutting down reactorHttpClient", e); } } }
class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider { private final static ObjectMapper mapper = Utils.getSimpleObjectMapper(); private final Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class); private final String masterKeyOrResourceToken; private final URI serviceEndpoint; private final ConnectionPolicy connectionPolicy; private final ConsistencyLevel consistencyLevel; private final BaseAuthorizationTokenProvider authorizationTokenProvider; private final UserAgentContainer userAgentContainer; private final boolean hasAuthKeyResourceToken; private final Configs configs; private CosmosKeyCredential cosmosKeyCredential; private TokenResolver tokenResolver; private SessionContainer sessionContainer; private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY; private RxClientCollectionCache collectionCache; private RxStoreModel gatewayProxy; private RxStoreModel storeModel; private GlobalAddressResolver addressResolver; private RxPartitionKeyRangeCache partitionKeyRangeCache; private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap; private IRetryPolicyFactory resetSessionTokenRetryPolicy; /** * Compatibility mode: Allows to specify compatibility mode used by client when * making query requests. Should be removed when application/sql is no longer * supported. */ private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default; private final HttpClient reactorHttpClient; private final GlobalEndpointManager globalEndpointManager; private final RetryPolicy retryPolicy; private volatile boolean useMultipleWriteLocations; private StoreClientFactory storeClientFactory; private GatewayServiceConfigurationReader gatewayConfigurationReader; public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, TokenResolver tokenResolver, CosmosKeyCredential cosmosKeyCredential) { this(serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, cosmosKeyCredential); this.tokenResolver = tokenResolver; } private RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosKeyCredential cosmosKeyCredential) { this(serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs, cosmosKeyCredential); if (permissionFeed != null && permissionFeed.size() > 0) { this.resourceTokensMap = new HashMap<>(); for (Permission permission : permissionFeed) { String[] segments = StringUtils.split(permission.getResourceLink(), Constants.Properties.PATH_SEPARATOR.charAt(0)); if (segments.length <= 0) { throw new IllegalArgumentException("resourceLink"); } List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null; PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false); if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) { throw new IllegalArgumentException(permission.getResourceLink()); } partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName); if (partitionKeyAndResourceTokenPairs == null) { partitionKeyAndResourceTokenPairs = new ArrayList<>(); this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs); } PartitionKey partitionKey = permission.getResourcePartitionKey(); partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair( partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty, permission.getToken())); logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token", pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken()); } if(this.resourceTokensMap.isEmpty()) { throw new IllegalArgumentException("permissionFeed"); } String firstToken = permissionFeed.get(0).getToken(); if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) { this.firstResourceTokenFromPermissionFeed = firstToken; } } } RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosKeyCredential cosmosKeyCredential) { logger.info( "Initializing DocumentClient with" + " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]", serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol()); this.configs = configs; this.masterKeyOrResourceToken = masterKeyOrResourceToken; this.serviceEndpoint = serviceEndpoint; this.cosmosKeyCredential = cosmosKeyCredential; if (this.cosmosKeyCredential != null) { hasAuthKeyResourceToken = false; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.cosmosKeyCredential); } else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.authorizationTokenProvider = null; hasAuthKeyResourceToken = true; } else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)){ this.cosmosKeyCredential = new CosmosKeyCredential(this.masterKeyOrResourceToken); hasAuthKeyResourceToken = false; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.cosmosKeyCredential); } else { hasAuthKeyResourceToken = false; this.authorizationTokenProvider = null; } if (connectionPolicy != null) { this.connectionPolicy = connectionPolicy; } else { this.connectionPolicy = new ConnectionPolicy(); } this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost()); this.consistencyLevel = consistencyLevel; this.userAgentContainer = new UserAgentContainer(); String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix(); if (userAgentSuffix != null && userAgentSuffix.length() > 0) { userAgentContainer.setSuffix(userAgentSuffix); } this.reactorHttpClient = httpClient(); this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs); this.retryPolicy = new RetryPolicy(this.globalEndpointManager, this.connectionPolicy); this.resetSessionTokenRetryPolicy = retryPolicy; } private void initializeGatewayConfigurationReader() { String resourceToken; if(this.tokenResolver != null) { resourceToken = this.tokenResolver.getAuthorizationToken("GET", "", CosmosResourceType.System, null); } else if(!this.hasAuthKeyResourceToken && this.authorizationTokenProvider == null) { resourceToken = this.firstResourceTokenFromPermissionFeed; } else { assert this.masterKeyOrResourceToken != null || this.cosmosKeyCredential != null; resourceToken = this.masterKeyOrResourceToken; } this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.serviceEndpoint, this.hasAuthKeyResourceToken, resourceToken, this.connectionPolicy, this.authorizationTokenProvider, this.reactorHttpClient); DatabaseAccount databaseAccount = this.gatewayConfigurationReader.initializeReaderAsync().block(); this.useMultipleWriteLocations = this.connectionPolicy.getUsingMultipleWriteLocations() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount); this.globalEndpointManager.refreshLocationAsync(databaseAccount, false).block(); } public void init() { this.gatewayProxy = createRxGatewayProxy(this.sessionContainer, this.consistencyLevel, this.queryCompatibilityMode, this.userAgentContainer, this.globalEndpointManager, this.reactorHttpClient); this.globalEndpointManager.init(); this.initializeGatewayConfigurationReader(); this.collectionCache = new RxClientCollectionCache(this.sessionContainer, this.gatewayProxy, this, this.retryPolicy); this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy); this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this, collectionCache); if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) { this.storeModel = this.gatewayProxy; } else { this.initializeDirectConnectivity(); } } private void initializeDirectConnectivity() { this.storeClientFactory = new StoreClientFactory( this.configs, this.connectionPolicy.getRequestTimeoutInMillis() / 1000, 0, this.userAgentContainer ); this.addressResolver = new GlobalAddressResolver( this.reactorHttpClient, this.globalEndpointManager, this.configs.getProtocol(), this, this.collectionCache, this.partitionKeyRangeCache, userAgentContainer, null, this.connectionPolicy); this.createStoreModel(true); } DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() { return new DatabaseAccountManagerInternal() { @Override public URI getServiceEndpoint() { return RxDocumentClientImpl.this.getServiceEndpoint(); } @Override public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { logger.info("Getting database account endpoint from {}", endpoint); return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint); } @Override public ConnectionPolicy getConnectionPolicy() { return RxDocumentClientImpl.this.getConnectionPolicy(); } }; } RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer, ConsistencyLevel consistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { return new RxGatewayStoreModel(sessionContainer, consistencyLevel, queryCompatibilityMode, userAgentContainer, globalEndpointManager, httpClient); } private HttpClient httpClient() { HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) .withMaxIdleConnectionTimeoutInMillis(this.connectionPolicy.getIdleConnectionTimeoutInMillis()) .withPoolSize(this.connectionPolicy.getMaxPoolSize()) .withHttpProxy(this.connectionPolicy.getProxy()) .withRequestTimeoutInMillis(this.connectionPolicy.getRequestTimeoutInMillis()); return HttpClient.createFixed(httpClientConfig); } private void createStoreModel(boolean subscribeRntbdStatus) { StoreClient storeClient = this.storeClientFactory.createStoreClient( this.addressResolver, this.sessionContainer, this.gatewayConfigurationReader, this, false ); this.storeModel = new ServerStoreModel(storeClient); } @Override public URI getServiceEndpoint() { return this.serviceEndpoint; } @Override public URI getWriteEndpoint() { return globalEndpointManager.getWriteEndpoints().stream().findFirst().orElse(null); } @Override public URI getReadEndpoint() { return globalEndpointManager.getReadEndpoints().stream().findFirst().orElse(null); } @Override public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } @Override public Flux<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) { if (database == null) { throw new IllegalArgumentException("Database"); } logger.debug("Creating a Database. id: [{}]", database.getId()); validateResource(database); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, database, requestHeaders, options); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(retryPolicyInstance, request), retryPolicyInstance); } private Flux<ResourceResponse<Database>> createDatabaseInternal(DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { try { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.Database, path, requestHeaders, options); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(retryPolicyInstance, request), retryPolicyInstance); } private Flux<ResourceResponse<Database>> deleteDatabaseInternal(DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { try { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Reading a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Database, path, requestHeaders, options); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(retryPolicyInstance, request), retryPolicyInstance); } private Flux<ResourceResponse<Database>> readDatabaseInternal(DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { try { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<Database>> readDatabases(FeedOptions options) { return readFeed(options, ResourceType.Database, Database.class, Paths.DATABASES_ROOT); } private String parentResourceLinkToQueryLink(String parentResouceLink, ResourceType resourceTypeEnum) { switch (resourceTypeEnum) { case Database: return Paths.DATABASES_ROOT; case DocumentCollection: return Utils.joinPath(parentResouceLink, Paths.COLLECTIONS_PATH_SEGMENT); case Document: return Utils.joinPath(parentResouceLink, Paths.DOCUMENTS_PATH_SEGMENT); case Offer: return Paths.OFFERS_ROOT; case User: return Utils.joinPath(parentResouceLink, Paths.USERS_PATH_SEGMENT); case Permission: return Utils.joinPath(parentResouceLink, Paths.PERMISSIONS_PATH_SEGMENT); case Attachment: return Utils.joinPath(parentResouceLink, Paths.ATTACHMENTS_PATH_SEGMENT); case StoredProcedure: return Utils.joinPath(parentResouceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); case Trigger: return Utils.joinPath(parentResouceLink, Paths.TRIGGERS_PATH_SEGMENT); case UserDefinedFunction: return Utils.joinPath(parentResouceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); default: throw new IllegalArgumentException("resource type not supported"); } } private <T extends Resource> Flux<FeedResponse<T>> createQuery( String parentResourceLink, SqlQuerySpec sqlQuery, FeedOptions options, Class<T> klass, ResourceType resourceTypeEnum) { String queryResourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum); UUID activityId = Utils.randomUUID(); IDocumentQueryClient queryClient = DocumentQueryClientImpl(RxDocumentClientImpl.this); Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory.createDocumentQueryExecutionContextAsync(queryClient, resourceTypeEnum, klass, sqlQuery , options, queryResourceLink, false, activityId); return executionContext.flatMap(IDocumentQueryExecutionContext::executeAsync); } @Override public Flux<FeedResponse<Database>> queryDatabases(String query, FeedOptions options) { return queryDatabases(new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, FeedOptions options) { return createQuery(Paths.DATABASES_ROOT, querySpec, options, Database.class, ResourceType.Database); } @Override public Flux<ResourceResponse<DocumentCollection>> createCollection(String databaseLink, DocumentCollection collection, RequestOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink, collection.getId()); validateResource(collection); String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Create, ResourceType.DocumentCollection, path, collection, requestHeaders, options); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(retryPolicyInstance, request), retryPolicyInstance); } private Flux<ResourceResponse<DocumentCollection>> createCollectionInternal(DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { try { if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); }); } catch (Exception e) { logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection, RequestOptions options) { if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Replacing a Collection. id: [{}]", collection.getId()); validateResource(collection); String path = Utils.joinPath(collection.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.DocumentCollection, path, collection, requestHeaders, options); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(retryPolicyInstance, request), retryPolicyInstance); } private Flux<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { try { if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { if (resourceResponse.getResource() != null) { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); } }); } catch (Exception e) { logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink, RequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(retryPolicyInstance, request), retryPolicyInstance); } private Flux<ResourceResponse<DocumentCollection>> deleteCollectionInternal(DocumentClientRetryPolicy retryPolicyInstance,RxDocumentServiceRequest request) { try { if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e); return Flux.error(e); } } private Flux<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, HttpConstants.HttpMethods.DELETE); if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); request.requestContext.updateRetryContext(documentClientRetryPolicy, true); } return getStoreProxy(request).processMessage(request); } Flux<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { populateHeaders(request, HttpConstants.HttpMethods.GET); return gatewayProxy.processMessage(request); } private Flux<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { populateHeaders(request, HttpConstants.HttpMethods.POST); return this.getStoreProxy(request).processMessage(request) .map(response -> { this.captureSessionToken(request, response); return response; } ); } @Override public Flux<ResourceResponse<DocumentCollection>> readCollection(String collectionLink, RequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(retryPolicyInstance, request), retryPolicyInstance); } private Flux<ResourceResponse<DocumentCollection>> readCollectionInternal(DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { try { if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, FeedOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.DocumentCollection, DocumentCollection.class, Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query, FeedOptions options) { return createQuery(databaseLink, new SqlQuerySpec(query), options, DocumentCollection.class, ResourceType.DocumentCollection); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(databaseLink, querySpec, options, DocumentCollection.class, ResourceType.DocumentCollection); } private static String serializeProcedureParams(Object[] objectArray) { String[] stringArray = new String[objectArray.length]; for (int i = 0; i < objectArray.length; ++i) { Object object = objectArray[i]; if (object instanceof JsonSerializable) { stringArray[i] = ((JsonSerializable) object).toJson(); } else { try { stringArray[i] = mapper.writeValueAsString(object); } catch (IOException e) { throw new IllegalArgumentException("Can't serialize the object into the json string", e); } } } return String.format("[%s]", StringUtils.join(stringArray, ",")); } private static void validateResource(Resource resource) { if (!StringUtils.isEmpty(resource.getId())) { if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 || resource.getId().indexOf('?') != -1 || resource.getId().indexOf(' throw new IllegalArgumentException("Id contains illegal chars."); } if (resource.getId().endsWith(" ")) { throw new IllegalArgumentException("Id ends with a space."); } } } private Map<String, String> getRequestHeaders(RequestOptions options) { Map<String, String> headers = new HashMap<>(); if (this.useMultipleWriteLocations) { headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString()); } if (consistencyLevel != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString()); } if (options == null) { return headers; } Map<String, String> customOptions = options.getHeaders(); if (customOptions != null) { headers.putAll(customOptions); } if (options.getAccessCondition() != null) { if (options.getAccessCondition().getType() == AccessConditionType.IF_MATCH) { headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getAccessCondition().getCondition()); } else { headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getAccessCondition().getCondition()); } } if (options.getConsistencyLevel() != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString()); } if (options.getIndexingDirective() != null) { headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString()); } if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) { String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude); } if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) { String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude); } if (!Strings.isNullOrEmpty(options.getSessionToken())) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken()); } if (options.getResourceTokenExpirySeconds() != null) { headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY, String.valueOf(options.getResourceTokenExpirySeconds())); } if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString()); } else if (options.getOfferType() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType()); } if (options.isPopulateQuotaInfo()) { headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true)); } if (options.isScriptLoggingEnabled()) { headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true)); } return headers; } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, String contentAsString, Document document, RequestOptions options) { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request); return collectionObs .map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsString, document, options, collectionValueHolder.v); return request; }); } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, String contentAsString, Object document, RequestOptions options, Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) { return collectionObs.map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsString, document, options, collectionValueHolder.v); return request; }); } private void addPartitionKeyInformation(RxDocumentServiceRequest request, String contentAsString, Object objectDoc, RequestOptions options, DocumentCollection collection) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); PartitionKeyInternal partitionKeyInternal = null; if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){ partitionKeyInternal = BridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else if (options != null && options.getPartitionKey() != null) { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey()); } else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) { partitionKeyInternal = PartitionKeyInternal.getEmpty(); } else if (contentAsString != null) { CosmosItemProperties cosmosItemProperties; if (objectDoc instanceof CosmosItemProperties) { cosmosItemProperties = (CosmosItemProperties) objectDoc; } else { cosmosItemProperties = new CosmosItemProperties(contentAsString); } partitionKeyInternal = extractPartitionKeyValueFromDocument(cosmosItemProperties, partitionKeyDefinition); } else { throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation."); } request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, escapeNonAscii(partitionKeyInternal.toJson())); } private static String escapeNonAscii(String partitionKeyJson) { StringBuilder sb = new StringBuilder(); for (int i = 0; i < partitionKeyJson.length(); i++) { int val = partitionKeyJson.charAt(i); if (val > 127) { sb.append("\\u").append(String.format("%04X", val)); } else { sb.append(partitionKeyJson.charAt(i)); } } return sb.toString(); } private static PartitionKeyInternal extractPartitionKeyValueFromDocument( CosmosItemProperties document, PartitionKeyDefinition partitionKeyDefinition) { if (partitionKeyDefinition != null) { String path = partitionKeyDefinition.getPaths().iterator().next(); List<String> parts = PathParser.getPathParts(path); if (parts.size() >= 1) { Object value = document.getObjectByPath(parts); if (value == null || value.getClass() == ObjectNode.class) { value = BridgeInternal.getNonePartitionKey(partitionKeyDefinition); } if (value instanceof PartitionKeyInternal) { return (PartitionKeyInternal) value; } else { return PartitionKeyInternal.fromObjectArray(Collections.singletonList(value), false); } } } return null; } private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(String documentCollectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, OperationType operationType) { if (StringUtils.isEmpty(documentCollectionLink)) { throw new IllegalArgumentException("documentCollectionLink"); } if (document == null) { throw new IllegalArgumentException("document"); } String content = toJsonString(document, mapper); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Document, path, requestHeaders, options, content); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request); return addPartitionKeyInformation(request, content, document, options, collectionObs); } private void populateHeaders(RxDocumentServiceRequest request, String httpMethod) { request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null || this.tokenResolver != null || this.cosmosKeyCredential != null) { String resourceName = request.getResourceAddress(); String authorization = this.getUserAuthorizationToken( resourceName, request.getResourceType(), httpMethod, request.getHeaders(), AuthorizationTokenType.PrimaryMasterKey, request.properties); try { authorization = URLEncoder.encode(authorization, "UTF-8"); } catch (UnsupportedEncodingException e) { throw new IllegalStateException("Failed to encode authtoken.", e); } request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); } if ((HttpConstants.HttpMethods.POST.equals(httpMethod) || HttpConstants.HttpMethods.PUT.equals(httpMethod)) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON); } if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) { request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); } } @Override public String getUserAuthorizationToken(String resourceName, ResourceType resourceType, String requestVerb, Map<String, String> headers, AuthorizationTokenType tokenType, Map<String, Object> properties) { if (this.tokenResolver != null) { return this.tokenResolver.getAuthorizationToken(requestVerb, resourceName, this.resolveCosmosResourceType(resourceType), properties != null ? Collections.unmodifiableMap(properties) : null); } else if (cosmosKeyCredential != null) { return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName, resourceType, headers); } else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) { return masterKeyOrResourceToken; } else { assert resourceTokensMap != null; if(resourceType.equals(ResourceType.DatabaseAccount)) { return this.firstResourceTokenFromPermissionFeed; } return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers); } } private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) { try { return CosmosResourceType.valueOf(resourceType.toString()); } catch (IllegalArgumentException e) { return CosmosResourceType.System; } } void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) { this.sessionContainer.setSessionToken(request, response.getResponseHeaders()); } private Flux<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy retryPolicy) { populateHeaders(request, HttpConstants.HttpMethods.POST); RxStoreModel storeProxy = this.getStoreProxy(request); if(request.requestContext != null && retryPolicy.getRetryCount() > 0) { retryPolicy.updateEndTime(); request.requestContext.updateRetryContext(retryPolicy, true); } return storeProxy.processMessage(request); } private Flux<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, HttpConstants.HttpMethods.POST); Map<String, String> headers = request.getHeaders(); assert (headers != null); headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true"); if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); request.requestContext.updateRetryContext(documentClientRetryPolicy, true); } return getStoreProxy(request).processMessage(request) .map(response -> { this.captureSessionToken(request, response); return response; } ); } private Flux<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { populateHeaders(request, HttpConstants.HttpMethods.PUT); if(request.requestContext != null && documentClientRetryPolicy.getRetryCount() > 0) { documentClientRetryPolicy.updateEndTime(); request.requestContext.updateRetryContext(documentClientRetryPolicy, true); } return getStoreProxy(request).processMessage(request); } @Override public Flux<ResourceResponse<Document>> createDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy documentClientRetryPolicy = requestRetryPolicy; logger.debug("Creating a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Create); return requestObs.flux() .flatMap(req -> { return ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal(documentClientRetryPolicy, req), documentClientRetryPolicy); }); } private Flux<ResourceResponse<Document>> createDocumentInternal(DocumentClientRetryPolicy requestRetryPolicy, RxDocumentServiceRequest request) { try { if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } Flux<RxDocumentServiceResponse> responseObservable = create(request, requestRetryPolicy); return responseObservable .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink); DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; Flux<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Upsert).flux(); return reqObs.flatMap(req -> { return ObservableHelper.inlineIfPossibleAsObs(() -> upsertDocumentInternal(finalRetryPolicyInstance, req), finalRetryPolicyInstance); }); } private Flux<ResourceResponse<Document>> upsertDocumentInternal(DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { try { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Flux<RxDocumentServiceResponse> responseObservable = upsert(request, retryPolicyInstance); return responseObservable .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Document>> replaceDocument(String documentLink, Object document, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { String collectionLink = Utils.getCollectionName(documentLink); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; if (document == null) { throw new IllegalArgumentException("document"); } if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Document typedDocument = documentFromObject(document, mapper); logger.debug("Replacing a Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options); String content = toJsonString(typedDocument, mapper); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(typedDocument, options, content, finalRequestRetryPolicy, request), finalRequestRetryPolicy); } @Override public Flux<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); if (options == null || options.getPartitionKey() == null) { String collectionLink = document.getSelfLink(); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; if (document == null) { throw new IllegalArgumentException("document"); } logger.debug("Replacing a Document. documentLink: [{}]", document.getSelfLink()); final String path = Utils.joinPath(document.getSelfLink(), null); final Map<String, String> requestHeaders = getRequestHeaders(options); String content = toJsonString(document, mapper); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceDocumentInternal(document, options, content, finalRequestRetryPolicy, request), requestRetryPolicy); } private Flux<ResourceResponse<Document>> replaceDocumentInternal(Document document, RequestOptions options, String content, DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { try { if (document == null) { throw new IllegalArgumentException("document"); } return this.replaceDocumentInternal(document.getSelfLink(), document, options, content, retryPolicyInstance, request); } catch (Exception e) { logger.debug("Failure in replacing a database due to [{}]", e.getMessage()); return Flux.error(e); } } private Flux<ResourceResponse<Document>> replaceDocumentInternal(String documentLink, Document document, RequestOptions options, String content, DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs); return requestObs.flux().flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return replace(request, retryPolicyInstance) .map(resp -> toResourceResponse(resp, Document.class));} ); } @Override public Flux<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Deleting a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.Document, path, requestHeaders, options); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDocumentInternal(options, requestRetryPolicy, request), requestRetryPolicy); } private Flux<ResourceResponse<Document>> deleteDocumentInternal(RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { try { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flux().flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(req); } return this.delete(req, retryPolicyInstance) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));}); } catch (Exception e) { logger.debug("Failure in deleting a document due to [{}]", e.getMessage()); return Flux.error(e); } } @Override public Flux<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Reading a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Document, path, requestHeaders, options); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(options, retryPolicyInstance, request), retryPolicyInstance); } private Flux<ResourceResponse<Document>> readDocumentInternal(RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, RxDocumentServiceRequest request) { try { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flux().flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); }); } catch (Exception e) { logger.debug("Failure in reading a document due to [{}]", e.getMessage()); return Flux.error(e); } } @Override public Flux<FeedResponse<Document>> readDocuments(String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return queryDocuments(collectionLink, "SELECT * FROM r", options); } @Override public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, String query, FeedOptions options) { return queryDocuments(collectionLink, new SqlQuerySpec(query), options); } private IDocumentQueryClient DocumentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl) { return new IDocumentQueryClient () { @Override public RxCollectionCache getCollectionCache() { return RxDocumentClientImpl.this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return RxDocumentClientImpl.this.partitionKeyRangeCache; } @Override public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy; } @Override public ConsistencyLevel getDefaultConsistencyLevelAsync() { return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel(); } @Override public ConsistencyLevel getDesiredConsistencyLevelAsync() { return RxDocumentClientImpl.this.consistencyLevel; } @Override public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) { return RxDocumentClientImpl.this.query(request).single(); } @Override public QueryCompatibilityMode getQueryCompatibilityMode() { return QueryCompatibilityMode.Default; } @Override public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) { return null; } }; } @Override public Flux<FeedResponse<Document>> queryDocuments(String collectionLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(collectionLink, querySpec, options, Document.class, ResourceType.Document); } @Override public Flux<FeedResponse<Document>> queryDocumentChangeFeed(final String collectionLink, final ChangeFeedOptions changeFeedOptions) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } ChangeFeedQueryImpl<Document> changeFeedQueryImpl = new ChangeFeedQueryImpl<Document>(this, ResourceType.Document, Document.class, collectionLink, changeFeedOptions); return changeFeedQueryImpl.executeAsync(); } @Override public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class, Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); } private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } validateResource(storedProcedure); String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); return request; } private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (udf == null) { throw new IllegalArgumentException("udf"); } validateResource(udf); String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); return request; } @Override public Flux<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Flux<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Create); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<StoredProcedure>> upsertStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Flux<ResourceResponse<StoredProcedure>> upsertStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in upserting a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Flux<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId()); RxDocumentClientImpl.validateResource(storedProcedure); String path = Utils.joinPath(storedProcedure.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy); } private Flux<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.StoredProcedure, StoredProcedure.class, Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT)); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query, FeedOptions options) { return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(collectionLink, querySpec, options, StoredProcedure.class, ResourceType.StoredProcedure); } @Override public Flux<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, Object[] procedureParams) { return this.executeStoredProcedure(storedProcedureLink, null, procedureParams); } @Override public Flux<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, RequestOptions options, Object[] procedureParams) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy); } private Flux<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink, RequestOptions options, Object[] procedureParams, DocumentClientRetryPolicy retryPolicy) { try { logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ExecuteJavaScript, ResourceType.StoredProcedure, path, procedureParams != null ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "", requestHeaders, options); Flux<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options).flux(); return reqObs.flatMap(req -> create(request, retryPolicy) .map(response -> { this.captureSessionToken(request, response); return toStoredProcedureResponse(response); })); } catch (Exception e) { logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Trigger>> upsertTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Trigger>> upsertTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Upsert); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in upserting a Trigger due to [{}]", e.getMessage(), e); return Flux.error(e); } } private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (trigger == null) { throw new IllegalArgumentException("trigger"); } RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Trigger, path, trigger, requestHeaders, options); return request; } @Override public Flux<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (trigger == null) { throw new IllegalArgumentException("trigger"); } logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId()); RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(trigger.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.Trigger, Trigger.class, Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query, FeedOptions options) { return queryTriggers(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(collectionLink, querySpec, options, Trigger.class, ResourceType.Trigger); } @Override public Flux<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<UserDefinedFunction>> upsertUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Upsert); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in upserting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (udf == null) { throw new IllegalArgumentException("udf"); } logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId()); validateResource(udf); String path = Utils.joinPath(udf.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.UserDefinedFunction, UserDefinedFunction.class, Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink, String query, FeedOptions options) { return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(String collectionLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(collectionLink, querySpec, options, UserDefinedFunction.class, ResourceType.UserDefinedFunction); } @Override public Flux<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Conflict, path, requestHeaders, options); Flux<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options).flux(); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, FeedOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return readFeed(options, ResourceType.Conflict, Conflict.class, Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query, FeedOptions options) { return queryConflicts(collectionLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(collectionLink, querySpec, options, Conflict.class, ResourceType.Conflict); } @Override public Flux<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options); Flux<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options).flux(); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy); } private Flux<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create); return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e); return Flux.error(e); } } private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (user == null) { throw new IllegalArgumentException("user"); } RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.User, path, user, requestHeaders, options); return request; } @Override public Flux<ResourceResponse<User>> replaceUser(User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (user == null) { throw new IllegalArgumentException("user"); } logger.debug("Replacing a User. user id [{}]", user.getId()); RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(user.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.User, path, user, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e); return Flux.error(e); } } public Flux<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Deleting a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<User>> readUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Reading a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<User>> readUsers(String databaseLink, FeedOptions options) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return readFeed(options, ResourceType.User, User.class, Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, FeedOptions options) { return queryUsers(databaseLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(databaseLink, querySpec, options, User.class, ResourceType.User); } @Override public Flux<ResourceResponse<Permission>> createPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy()); } private Flux<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Create); return this.create(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e); return Flux.error(e); } } private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } if (permission == null) { throw new IllegalArgumentException("permission"); } RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(operationType, ResourceType.Permission, path, permission, requestHeaders, options); return request; } @Override public Flux<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (permission == null) { throw new IllegalArgumentException("permission"); } logger.debug("Replacing a Permission. permission id [{}]", permission.getId()); RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(permission.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Delete, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Reading a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<Permission>> readPermissions(String userLink, FeedOptions options) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } return readFeed(options, ResourceType.Permission, Permission.class, Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query, FeedOptions options) { return queryPermissions(userLink, new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec, FeedOptions options) { return createQuery(userLink, querySpec, options, Permission.class, ResourceType.Permission); } @Override public Flux<ResourceResponse<Offer>> replaceOffer(Offer offer) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy); } private Flux<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) { try { if (offer == null) { throw new IllegalArgumentException("offer"); } logger.debug("Replacing an Offer. offer id [{}]", offer.getId()); RxDocumentClientImpl.validateResource(offer); String path = Utils.joinPath(offer.getSelfLink(), null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Replace, ResourceType.Offer, path, offer, null, null); return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<ResourceResponse<Offer>> readOffer(String offerLink) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance); } private Flux<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(offerLink)) { throw new IllegalArgumentException("offerLink"); } logger.debug("Reading an Offer. offerLink [{}]", offerLink); String path = Utils.joinPath(offerLink, null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e); return Flux.error(e); } } @Override public Flux<FeedResponse<Offer>> readOffers(FeedOptions options) { return readFeed(options, ResourceType.Offer, Offer.class, Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null)); } private <T extends Resource> Flux<FeedResponse<T>> readFeedCollectionChild(FeedOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) { if (options == null) { options = new FeedOptions(); } int maxPageSize = options.maxItemCount() != null ? options.maxItemCount() : -1; final FeedOptions finalFeedOptions = options; RequestOptions requestOptions = new RequestOptions(); requestOptions.setPartitionKey(options.partitionKey()); BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> { Map<String, String> requestHeaders = new HashMap<>(); if (continuationToken != null) { requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); } requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize)); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, finalFeedOptions); return request; }; Function<RxDocumentServiceRequest, Flux<FeedResponse<T>>> executeFunc = request -> { return ObservableHelper.inlineIfPossibleAsObs(() -> { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(request); Mono<RxDocumentServiceRequest> requestObs = this.addPartitionKeyInformation(request, null, null, requestOptions, collectionObs); return requestObs.flux().flatMap(req -> this.readFeed(req) .map(response -> toFeedResponsePage(response, klass))); }, this.resetSessionTokenRetryPolicy.getRequestPolicy()); }; return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize); } private <T extends Resource> Flux<FeedResponse<T>> readFeed(FeedOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) { if (options == null) { options = new FeedOptions(); } int maxPageSize = options.maxItemCount() != null ? options.maxItemCount() : -1; final FeedOptions finalFeedOptions = options; BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> { Map<String, String> requestHeaders = new HashMap<>(); if (continuationToken != null) { requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); } requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize)); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, finalFeedOptions); return request; }; Function<RxDocumentServiceRequest, Flux<FeedResponse<T>>> executeFunc = request -> { return ObservableHelper.inlineIfPossibleAsObs(() -> readFeed(request).map(response -> toFeedResponsePage(response, klass)), this.resetSessionTokenRetryPolicy.getRequestPolicy()); }; return Paginator.getPaginatedQueryResultAsObservable(options, createRequestFunc, executeFunc, klass, maxPageSize); } @Override public Flux<FeedResponse<Offer>> queryOffers(String query, FeedOptions options) { return queryOffers(new SqlQuerySpec(query), options); } @Override public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, FeedOptions options) { return createQuery(null, querySpec, options, Offer.class, ResourceType.Offer); } @Override public Flux<DatabaseAccount> getDatabaseAccount() { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(); return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy), documentClientRetryPolicy); } private Flux<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Getting Database Account"); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.DatabaseAccount, "", (HashMap<String, String>) null, null); return this.read(request, documentClientRetryPolicy).map(response -> toDatabaseAccount(response)); } catch (Exception e) { logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e); return Flux.error(e); } } public Object getSession() { return this.sessionContainer; } public void setSession(Object sessionContainer) { this.sessionContainer = (SessionContainer) sessionContainer; } public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { return Flux.defer(() -> { RxDocumentServiceRequest request = RxDocumentServiceRequest.create(OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null); this.populateHeaders(request, HttpConstants.HttpMethods.GET); request.setEndpointOverride(endpoint); return this.gatewayProxy.processMessage(request).doOnError(e -> { String message = String.format("Failed to retrieve database account information. %s", e.getCause() != null ? e.getCause().toString() : e.toString()); logger.warn(message); }).map(rsp -> rsp.getResource(DatabaseAccount.class)) .doOnNext(databaseAccount -> { this.useMultipleWriteLocations = this.connectionPolicy.getUsingMultipleWriteLocations() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount); }); }); } /** * Certain requests must be routed through gateway even when the client connectivity mode is direct. * * @param request * @return RxStoreModel */ private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) { if (request.UseGatewayMode) { return this.gatewayProxy; } ResourceType resourceType = request.getResourceType(); OperationType operationType = request.getOperationType(); if (resourceType == ResourceType.Offer || resourceType.isScript() && operationType != OperationType.ExecuteJavaScript || resourceType == ResourceType.PartitionKeyRange) { return this.gatewayProxy; } if (operationType == OperationType.Create || operationType == OperationType.Upsert) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection || resourceType == ResourceType.Permission) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Delete) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Replace) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Read) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else { if ((request.getOperationType() == OperationType.Query || request.getOperationType() == OperationType.SqlQuery) && Utils.isCollectionChild(request.getResourceType())) { if (request.getPartitionKeyRangeIdentity() == null) { return this.gatewayProxy; } } return this.storeModel; } } @Override public void close() { logger.info("Shutting down ..."); LifeCycleUtils.closeQuietly(this.globalEndpointManager); LifeCycleUtils.closeQuietly(this.storeClientFactory); try { this.reactorHttpClient.shutdown(); } catch (Exception e) { logger.warn("Failure in shutting down reactorHttpClient", e); } } }
Parametrized type -> `CompletableFuture<T>`
void run() throws Exception { successMeter = metricsRegistry.meter(" failureMeter = metricsRegistry.meter(" switch (configuration.getOperationType()) { case ReadLatency: case Mixed: latency = metricsRegistry.timer("Latency"); break; default: break; } reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS); long startTime = System.currentTimeMillis(); AtomicLong count = new AtomicLong(0); long i; for ( i = 0; shouldContinue(startTime, i); i++) { ResultHandler<T, Throwable> resultHandler = new ResultHandler<T, Throwable>() { @Override public T apply(T t, Throwable throwable) { successMeter.mark(); concurrencyControlSemaphore.release(); if (t != null) { assert(throwable == null); SyncBenchmark.this.onSuccess(); synchronized (count) { count.incrementAndGet(); count.notify(); } } else { assert(throwable != null); failureMeter.mark(); logger.error("Encountered failure {} on thread {}" , throwable.getMessage(), Thread.currentThread().getName(), throwable); concurrencyControlSemaphore.release(); SyncBenchmark.this.onError(throwable); synchronized (count) { count.incrementAndGet(); count.notify(); } } return t; } }; concurrencyControlSemaphore.acquire(); final long cnt = i; switch (configuration.getOperationType()) { case ReadLatency: LatencyListener latencyListener = new LatencyListener(resultHandler, latency); latencyListener.context = latency.time(); resultHandler = latencyListener; break; default: break; } final ResultHandler<T, Throwable> finalResultHandler = resultHandler; CompletableFuture futureResult = CompletableFuture.supplyAsync(() -> { try { finalResultHandler.init(); return performWorkload(cnt); } catch (Exception e) { throw propagate(e); } }, executorService); futureResult.handle(resultHandler); } synchronized (count) { while (count.get() < i) { count.wait(); } } long endTime = System.currentTimeMillis(); logger.info("[{}] operations performed in [{}] seconds.", configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); reporter.report(); reporter.close(); }
CompletableFuture futureResult = CompletableFuture.supplyAsync(() -> {
void run() throws Exception { successMeter = metricsRegistry.meter(" failureMeter = metricsRegistry.meter(" switch (configuration.getOperationType()) { case ReadLatency: case Mixed: latency = metricsRegistry.timer("Latency"); break; default: break; } reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS); long startTime = System.currentTimeMillis(); AtomicLong count = new AtomicLong(0); long i; for ( i = 0; shouldContinue(startTime, i); i++) { ResultHandler<T, Throwable> resultHandler = new ResultHandler<T, Throwable>() { @Override public T apply(T t, Throwable throwable) { successMeter.mark(); concurrencyControlSemaphore.release(); if (t != null) { assert(throwable == null); SyncBenchmark.this.onSuccess(); synchronized (count) { count.incrementAndGet(); count.notify(); } } else { assert(throwable != null); failureMeter.mark(); logger.error("Encountered failure {} on thread {}" , throwable.getMessage(), Thread.currentThread().getName(), throwable); concurrencyControlSemaphore.release(); SyncBenchmark.this.onError(throwable); synchronized (count) { count.incrementAndGet(); count.notify(); } } return t; } }; concurrencyControlSemaphore.acquire(); final long cnt = i; switch (configuration.getOperationType()) { case ReadLatency: LatencyListener<T> latencyListener = new LatencyListener(resultHandler, latency); latencyListener.context = latency.time(); resultHandler = latencyListener; break; default: break; } final ResultHandler<T, Throwable> finalResultHandler = resultHandler; CompletableFuture<T> futureResult = CompletableFuture.supplyAsync(() -> { try { finalResultHandler.init(); return performWorkload(cnt); } catch (Exception e) { throw propagate(e); } }, executorService); futureResult.handle(resultHandler); } synchronized (count) { while (count.get() < i) { count.wait(); } } long endTime = System.currentTimeMillis(); logger.info("[{}] operations performed in [{}] seconds.", configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); reporter.report(); reporter.close(); }
class LatencyListener<T> extends ResultHandler<T, Throwable> { private final ResultHandler<T, Throwable> baseFunction; Timer.Context context; LatencyListener(ResultHandler<T, Throwable> baseFunction, Timer latency) { this.baseFunction = baseFunction; } protected void init() { super.init(); context = latency.time(); } @Override public T apply(T o, Throwable throwable) { context.stop(); return baseFunction.apply(o, throwable); } }
class LatencyListener<T> extends ResultHandler<T, Throwable> { private final ResultHandler<T, Throwable> baseFunction; private final Timer latencyTimer; Timer.Context context; LatencyListener(ResultHandler<T, Throwable> baseFunction, Timer latencyTimer) { this.baseFunction = baseFunction; this.latencyTimer = latencyTimer; } protected void init() { super.init(); context = latencyTimer.time(); } @Override public T apply(T o, Throwable throwable) { context.stop(); return baseFunction.apply(o, throwable); } }
addressed
void run() throws Exception { successMeter = metricsRegistry.meter(" failureMeter = metricsRegistry.meter(" switch (configuration.getOperationType()) { case ReadLatency: case Mixed: latency = metricsRegistry.timer("Latency"); break; default: break; } reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS); long startTime = System.currentTimeMillis(); AtomicLong count = new AtomicLong(0); long i; for ( i = 0; shouldContinue(startTime, i); i++) { ResultHandler<T, Throwable> resultHandler = new ResultHandler<T, Throwable>() { @Override public T apply(T t, Throwable throwable) { successMeter.mark(); concurrencyControlSemaphore.release(); if (t != null) { assert(throwable == null); SyncBenchmark.this.onSuccess(); synchronized (count) { count.incrementAndGet(); count.notify(); } } else { assert(throwable != null); failureMeter.mark(); logger.error("Encountered failure {} on thread {}" , throwable.getMessage(), Thread.currentThread().getName(), throwable); concurrencyControlSemaphore.release(); SyncBenchmark.this.onError(throwable); synchronized (count) { count.incrementAndGet(); count.notify(); } } return t; } }; concurrencyControlSemaphore.acquire(); final long cnt = i; switch (configuration.getOperationType()) { case ReadLatency: LatencyListener latencyListener = new LatencyListener(resultHandler, latency); latencyListener.context = latency.time(); resultHandler = latencyListener; break; default: break; } final ResultHandler<T, Throwable> finalResultHandler = resultHandler; CompletableFuture futureResult = CompletableFuture.supplyAsync(() -> { try { finalResultHandler.init(); return performWorkload(cnt); } catch (Exception e) { throw propagate(e); } }, executorService); futureResult.handle(resultHandler); } synchronized (count) { while (count.get() < i) { count.wait(); } } long endTime = System.currentTimeMillis(); logger.info("[{}] operations performed in [{}] seconds.", configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); reporter.report(); reporter.close(); }
CompletableFuture futureResult = CompletableFuture.supplyAsync(() -> {
void run() throws Exception { successMeter = metricsRegistry.meter(" failureMeter = metricsRegistry.meter(" switch (configuration.getOperationType()) { case ReadLatency: case Mixed: latency = metricsRegistry.timer("Latency"); break; default: break; } reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS); long startTime = System.currentTimeMillis(); AtomicLong count = new AtomicLong(0); long i; for ( i = 0; shouldContinue(startTime, i); i++) { ResultHandler<T, Throwable> resultHandler = new ResultHandler<T, Throwable>() { @Override public T apply(T t, Throwable throwable) { successMeter.mark(); concurrencyControlSemaphore.release(); if (t != null) { assert(throwable == null); SyncBenchmark.this.onSuccess(); synchronized (count) { count.incrementAndGet(); count.notify(); } } else { assert(throwable != null); failureMeter.mark(); logger.error("Encountered failure {} on thread {}" , throwable.getMessage(), Thread.currentThread().getName(), throwable); concurrencyControlSemaphore.release(); SyncBenchmark.this.onError(throwable); synchronized (count) { count.incrementAndGet(); count.notify(); } } return t; } }; concurrencyControlSemaphore.acquire(); final long cnt = i; switch (configuration.getOperationType()) { case ReadLatency: LatencyListener<T> latencyListener = new LatencyListener(resultHandler, latency); latencyListener.context = latency.time(); resultHandler = latencyListener; break; default: break; } final ResultHandler<T, Throwable> finalResultHandler = resultHandler; CompletableFuture<T> futureResult = CompletableFuture.supplyAsync(() -> { try { finalResultHandler.init(); return performWorkload(cnt); } catch (Exception e) { throw propagate(e); } }, executorService); futureResult.handle(resultHandler); } synchronized (count) { while (count.get() < i) { count.wait(); } } long endTime = System.currentTimeMillis(); logger.info("[{}] operations performed in [{}] seconds.", configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); reporter.report(); reporter.close(); }
class LatencyListener<T> extends ResultHandler<T, Throwable> { private final ResultHandler<T, Throwable> baseFunction; Timer.Context context; LatencyListener(ResultHandler<T, Throwable> baseFunction, Timer latency) { this.baseFunction = baseFunction; } protected void init() { super.init(); context = latency.time(); } @Override public T apply(T o, Throwable throwable) { context.stop(); return baseFunction.apply(o, throwable); } }
class LatencyListener<T> extends ResultHandler<T, Throwable> { private final ResultHandler<T, Throwable> baseFunction; private final Timer latencyTimer; Timer.Context context; LatencyListener(ResultHandler<T, Throwable> baseFunction, Timer latencyTimer) { this.baseFunction = baseFunction; this.latencyTimer = latencyTimer; } protected void init() { super.init(); context = latencyTimer.time(); } @Override public T apply(T o, Throwable throwable) { context.stop(); return baseFunction.apply(o, throwable); } }
should we also remove the Model version log? Let us keep the codesnippet focus on the more valuable infor.
public void recognizeBatchEntities() { List<TextDocumentInput> textDocumentInputs = Arrays.asList( new TextDocumentInput("0", "I had a wonderful trip to Seattle last week."), new TextDocumentInput("1", "I work at Microsoft.")); textAnalyticsAsyncClient.recognizeBatchEntities(textDocumentInputs).subscribe(recognizeEntitiesResults -> { System.out.printf("Model version: %s%n", recognizeEntitiesResults.getModelVersion()); final TextDocumentBatchStatistics batchStatistics = recognizeEntitiesResults.getStatistics(); System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); for (RecognizeEntitiesResult recognizeEntitiesResult : recognizeEntitiesResults) { for (NamedEntity entity : recognizeEntitiesResult.getNamedEntities()) { System.out.printf( "Recognized Named Entity: %s, Type: %s, Score: %s.%n", entity.getText(), entity.getType(), entity.getScore()); } } }); }
System.out.printf("Model version: %s%n", recognizeEntitiesResults.getModelVersion());
public void recognizeBatchEntities() { List<TextDocumentInput> textDocumentInputs = Arrays.asList( new TextDocumentInput("0", "I had a wonderful trip to Seattle last week."), new TextDocumentInput("1", "I work at Microsoft.")); textAnalyticsAsyncClient.recognizeBatchEntities(textDocumentInputs).subscribe(recognizeEntitiesResults -> { TextDocumentBatchStatistics batchStatistics = recognizeEntitiesResults.getStatistics(); System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); for (RecognizeEntitiesResult recognizeEntitiesResult : recognizeEntitiesResults) { for (NamedEntity entity : recognizeEntitiesResult.getNamedEntities()) { System.out.printf( "Recognized Named Entity: %s, Type: %s, Score: %s.%n", entity.getText(), entity.getType(), entity.getScore()); } } }); }
class TextAnalyticsAsyncClientJavaDocCodeSnippets { private static final String SUBSCRIPTION_KEY = null; private static final String ENDPOINT = null; TextAnalyticsAsyncClient textAnalyticsAsyncClient = createTextAnalyticsAsyncClient(); /** * Code snippet for creating a {@link TextAnalyticsAsyncClient} * * @return The TextAnalyticsAsyncClient object */ public TextAnalyticsAsyncClient createTextAnalyticsAsyncClient() { TextAnalyticsAsyncClient textAnalyticsAsyncClient = new TextAnalyticsClientBuilder() .subscriptionKey(SUBSCRIPTION_KEY) .endpoint(ENDPOINT) .buildAsyncClient(); return textAnalyticsAsyncClient; } /** * Code snippet for {@link TextAnalyticsAsyncClient */ public void detectLanguages() { String inputText = "Bonjour tout le monde"; textAnalyticsAsyncClient.detectLanguage(inputText).subscribe(detectLanguageResult -> { for (DetectedLanguage detectedLanguage : detectLanguageResult.getDetectedLanguages()) { System.out.printf("Detected languages name: %s, ISO 6391 Name: %s, Score: %s.%n", detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getScore()); } }); } /** * Code snippet for {@link TextAnalyticsAsyncClient */ public void detectLanguageWithResponse() { String input = "This text is in English"; String countryHint = "US"; textAnalyticsAsyncClient.detectLanguageWithResponse(input, countryHint).subscribe(detectLanguageResult -> { for (DetectedLanguage detectedLanguage : detectLanguageResult.getValue().getDetectedLanguages()) { System.out.printf("Detected languages name: %s, ISO 6391 Name: %s, Score: %s.%n", detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getScore()); } }); } /** * Code snippet for {@link TextAnalyticsAsyncClient */ public void detectLanguagesStringList() { final List<String> textInputs = Arrays.asList( "This is written in English", "Este es un document escrito en Español."); textAnalyticsAsyncClient.detectLanguages(textInputs).subscribe(detectedBatchResult -> { System.out.printf("Model version: %s%n", detectedBatchResult.getModelVersion()); final TextDocumentBatchStatistics batchStatistics = detectedBatchResult.getStatistics(); System.out.printf("Batch statistics,transaction count: %s, valid document count: %s.%n", batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); for (DetectLanguageResult detectLanguageResult : detectedBatchResult) { for (DetectedLanguage detectedLanguage : detectLanguageResult.getDetectedLanguages()) { System.out.printf("Detected language: %s, ISO 6391 name: %s, score: %s.%n", detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getScore()); } } }); } /** * Code snippet for {@link TextAnalyticsAsyncClient */ public void detectLanguagesWithResponse() { final List<String> textInputs1 = Arrays.asList( "This is written in English", "Este es un document escrito en Español."); textAnalyticsAsyncClient.detectLanguagesWithResponse(textInputs1, "US").subscribe(response -> { DocumentResultCollection<DetectLanguageResult> detectedBatchResult = response.getValue(); System.out.printf("Model version: %s%n", detectedBatchResult.getModelVersion()); final TextDocumentBatchStatistics batchStatistics = detectedBatchResult.getStatistics(); System.out.printf("Batch statistics,transaction count: %s, valid document count: %s.%n", batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); for (DetectLanguageResult detectLanguageResult : detectedBatchResult) { for (DetectedLanguage detectedLanguage : detectLanguageResult.getDetectedLanguages()) { System.out.printf("Detected language: %s, ISO 6391 name: %s, score: %s.%n", detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getScore()); } } }); } /** * Code snippet for {@link TextAnalyticsAsyncClient
class TextAnalyticsAsyncClientJavaDocCodeSnippets { private static final String SUBSCRIPTION_KEY = null; private static final String ENDPOINT = null; TextAnalyticsAsyncClient textAnalyticsAsyncClient = createTextAnalyticsAsyncClient(); /** * Code snippet for creating a {@link TextAnalyticsAsyncClient} * * @return The TextAnalyticsAsyncClient object */ public TextAnalyticsAsyncClient createTextAnalyticsAsyncClient() { TextAnalyticsAsyncClient textAnalyticsAsyncClient = new TextAnalyticsClientBuilder() .subscriptionKey(SUBSCRIPTION_KEY) .endpoint(ENDPOINT) .buildAsyncClient(); return textAnalyticsAsyncClient; } /** * Code snippet for {@link TextAnalyticsAsyncClient */ public void detectLanguages() { String inputText = "Bonjour tout le monde"; textAnalyticsAsyncClient.detectLanguage(inputText).subscribe(detectLanguageResult -> { for (DetectedLanguage detectedLanguage : detectLanguageResult.getDetectedLanguages()) { System.out.printf("Detected languages name: %s, ISO 6391 Name: %s, Score: %s.%n", detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getScore()); } }); } /** * Code snippet for {@link TextAnalyticsAsyncClient */ public void detectLanguageWithResponse() { String input = "This text is in English"; String countryHint = "US"; textAnalyticsAsyncClient.detectLanguageWithResponse(input, countryHint).subscribe(detectLanguageResult -> { for (DetectedLanguage detectedLanguage : detectLanguageResult.getValue().getDetectedLanguages()) { System.out.printf("Detected languages name: %s, ISO 6391 Name: %s, Score: %s.%n", detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getScore()); } }); } /** * Code snippet for {@link TextAnalyticsAsyncClient */ public void detectLanguagesStringList() { final List<String> textInputs = Arrays.asList( "This is written in English", "Este es un document escrito en Español."); textAnalyticsAsyncClient.detectLanguages(textInputs).subscribe(detectedBatchResult -> { final TextDocumentBatchStatistics batchStatistics = detectedBatchResult.getStatistics(); System.out.printf("Batch statistics,transaction count: %s, valid document count: %s.%n", batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); for (DetectLanguageResult detectLanguageResult : detectedBatchResult) { for (DetectedLanguage detectedLanguage : detectLanguageResult.getDetectedLanguages()) { System.out.printf("Detected language: %s, ISO 6391 name: %s, score: %s.%n", detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getScore()); } } }); } /** * Code snippet for {@link TextAnalyticsAsyncClient */ public void detectLanguagesWithResponse() { final List<String> textInputs1 = Arrays.asList( "This is written in English", "Este es un document escrito en Español."); textAnalyticsAsyncClient.detectLanguagesWithResponse(textInputs1, "US").subscribe(response -> { DocumentResultCollection<DetectLanguageResult> detectedBatchResult = response.getValue(); final TextDocumentBatchStatistics batchStatistics = detectedBatchResult.getStatistics(); System.out.printf("Batch statistics,transaction count: %s, valid document count: %s.%n", batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); for (DetectLanguageResult detectLanguageResult : detectedBatchResult) { for (DetectedLanguage detectedLanguage : detectLanguageResult.getDetectedLanguages()) { System.out.printf("Detected language: %s, ISO 6391 name: %s, score: %s.%n", detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getScore()); } } }); } /** * Code snippet for {@link TextAnalyticsAsyncClient
removed
public void recognizeBatchEntities() { List<TextDocumentInput> textDocumentInputs = Arrays.asList( new TextDocumentInput("0", "I had a wonderful trip to Seattle last week."), new TextDocumentInput("1", "I work at Microsoft.")); textAnalyticsAsyncClient.recognizeBatchEntities(textDocumentInputs).subscribe(recognizeEntitiesResults -> { System.out.printf("Model version: %s%n", recognizeEntitiesResults.getModelVersion()); final TextDocumentBatchStatistics batchStatistics = recognizeEntitiesResults.getStatistics(); System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); for (RecognizeEntitiesResult recognizeEntitiesResult : recognizeEntitiesResults) { for (NamedEntity entity : recognizeEntitiesResult.getNamedEntities()) { System.out.printf( "Recognized Named Entity: %s, Type: %s, Score: %s.%n", entity.getText(), entity.getType(), entity.getScore()); } } }); }
System.out.printf("Model version: %s%n", recognizeEntitiesResults.getModelVersion());
public void recognizeBatchEntities() { List<TextDocumentInput> textDocumentInputs = Arrays.asList( new TextDocumentInput("0", "I had a wonderful trip to Seattle last week."), new TextDocumentInput("1", "I work at Microsoft.")); textAnalyticsAsyncClient.recognizeBatchEntities(textDocumentInputs).subscribe(recognizeEntitiesResults -> { TextDocumentBatchStatistics batchStatistics = recognizeEntitiesResults.getStatistics(); System.out.printf("Batch statistics, transaction count: %s, valid document count: %s.%n", batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); for (RecognizeEntitiesResult recognizeEntitiesResult : recognizeEntitiesResults) { for (NamedEntity entity : recognizeEntitiesResult.getNamedEntities()) { System.out.printf( "Recognized Named Entity: %s, Type: %s, Score: %s.%n", entity.getText(), entity.getType(), entity.getScore()); } } }); }
class TextAnalyticsAsyncClientJavaDocCodeSnippets { private static final String SUBSCRIPTION_KEY = null; private static final String ENDPOINT = null; TextAnalyticsAsyncClient textAnalyticsAsyncClient = createTextAnalyticsAsyncClient(); /** * Code snippet for creating a {@link TextAnalyticsAsyncClient} * * @return The TextAnalyticsAsyncClient object */ public TextAnalyticsAsyncClient createTextAnalyticsAsyncClient() { TextAnalyticsAsyncClient textAnalyticsAsyncClient = new TextAnalyticsClientBuilder() .subscriptionKey(SUBSCRIPTION_KEY) .endpoint(ENDPOINT) .buildAsyncClient(); return textAnalyticsAsyncClient; } /** * Code snippet for {@link TextAnalyticsAsyncClient */ public void detectLanguages() { String inputText = "Bonjour tout le monde"; textAnalyticsAsyncClient.detectLanguage(inputText).subscribe(detectLanguageResult -> { for (DetectedLanguage detectedLanguage : detectLanguageResult.getDetectedLanguages()) { System.out.printf("Detected languages name: %s, ISO 6391 Name: %s, Score: %s.%n", detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getScore()); } }); } /** * Code snippet for {@link TextAnalyticsAsyncClient */ public void detectLanguageWithResponse() { String input = "This text is in English"; String countryHint = "US"; textAnalyticsAsyncClient.detectLanguageWithResponse(input, countryHint).subscribe(detectLanguageResult -> { for (DetectedLanguage detectedLanguage : detectLanguageResult.getValue().getDetectedLanguages()) { System.out.printf("Detected languages name: %s, ISO 6391 Name: %s, Score: %s.%n", detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getScore()); } }); } /** * Code snippet for {@link TextAnalyticsAsyncClient */ public void detectLanguagesStringList() { final List<String> textInputs = Arrays.asList( "This is written in English", "Este es un document escrito en Español."); textAnalyticsAsyncClient.detectLanguages(textInputs).subscribe(detectedBatchResult -> { System.out.printf("Model version: %s%n", detectedBatchResult.getModelVersion()); final TextDocumentBatchStatistics batchStatistics = detectedBatchResult.getStatistics(); System.out.printf("Batch statistics,transaction count: %s, valid document count: %s.%n", batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); for (DetectLanguageResult detectLanguageResult : detectedBatchResult) { for (DetectedLanguage detectedLanguage : detectLanguageResult.getDetectedLanguages()) { System.out.printf("Detected language: %s, ISO 6391 name: %s, score: %s.%n", detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getScore()); } } }); } /** * Code snippet for {@link TextAnalyticsAsyncClient */ public void detectLanguagesWithResponse() { final List<String> textInputs1 = Arrays.asList( "This is written in English", "Este es un document escrito en Español."); textAnalyticsAsyncClient.detectLanguagesWithResponse(textInputs1, "US").subscribe(response -> { DocumentResultCollection<DetectLanguageResult> detectedBatchResult = response.getValue(); System.out.printf("Model version: %s%n", detectedBatchResult.getModelVersion()); final TextDocumentBatchStatistics batchStatistics = detectedBatchResult.getStatistics(); System.out.printf("Batch statistics,transaction count: %s, valid document count: %s.%n", batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); for (DetectLanguageResult detectLanguageResult : detectedBatchResult) { for (DetectedLanguage detectedLanguage : detectLanguageResult.getDetectedLanguages()) { System.out.printf("Detected language: %s, ISO 6391 name: %s, score: %s.%n", detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getScore()); } } }); } /** * Code snippet for {@link TextAnalyticsAsyncClient
class TextAnalyticsAsyncClientJavaDocCodeSnippets { private static final String SUBSCRIPTION_KEY = null; private static final String ENDPOINT = null; TextAnalyticsAsyncClient textAnalyticsAsyncClient = createTextAnalyticsAsyncClient(); /** * Code snippet for creating a {@link TextAnalyticsAsyncClient} * * @return The TextAnalyticsAsyncClient object */ public TextAnalyticsAsyncClient createTextAnalyticsAsyncClient() { TextAnalyticsAsyncClient textAnalyticsAsyncClient = new TextAnalyticsClientBuilder() .subscriptionKey(SUBSCRIPTION_KEY) .endpoint(ENDPOINT) .buildAsyncClient(); return textAnalyticsAsyncClient; } /** * Code snippet for {@link TextAnalyticsAsyncClient */ public void detectLanguages() { String inputText = "Bonjour tout le monde"; textAnalyticsAsyncClient.detectLanguage(inputText).subscribe(detectLanguageResult -> { for (DetectedLanguage detectedLanguage : detectLanguageResult.getDetectedLanguages()) { System.out.printf("Detected languages name: %s, ISO 6391 Name: %s, Score: %s.%n", detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getScore()); } }); } /** * Code snippet for {@link TextAnalyticsAsyncClient */ public void detectLanguageWithResponse() { String input = "This text is in English"; String countryHint = "US"; textAnalyticsAsyncClient.detectLanguageWithResponse(input, countryHint).subscribe(detectLanguageResult -> { for (DetectedLanguage detectedLanguage : detectLanguageResult.getValue().getDetectedLanguages()) { System.out.printf("Detected languages name: %s, ISO 6391 Name: %s, Score: %s.%n", detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getScore()); } }); } /** * Code snippet for {@link TextAnalyticsAsyncClient */ public void detectLanguagesStringList() { final List<String> textInputs = Arrays.asList( "This is written in English", "Este es un document escrito en Español."); textAnalyticsAsyncClient.detectLanguages(textInputs).subscribe(detectedBatchResult -> { final TextDocumentBatchStatistics batchStatistics = detectedBatchResult.getStatistics(); System.out.printf("Batch statistics,transaction count: %s, valid document count: %s.%n", batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); for (DetectLanguageResult detectLanguageResult : detectedBatchResult) { for (DetectedLanguage detectedLanguage : detectLanguageResult.getDetectedLanguages()) { System.out.printf("Detected language: %s, ISO 6391 name: %s, score: %s.%n", detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getScore()); } } }); } /** * Code snippet for {@link TextAnalyticsAsyncClient */ public void detectLanguagesWithResponse() { final List<String> textInputs1 = Arrays.asList( "This is written in English", "Este es un document escrito en Español."); textAnalyticsAsyncClient.detectLanguagesWithResponse(textInputs1, "US").subscribe(response -> { DocumentResultCollection<DetectLanguageResult> detectedBatchResult = response.getValue(); final TextDocumentBatchStatistics batchStatistics = detectedBatchResult.getStatistics(); System.out.printf("Batch statistics,transaction count: %s, valid document count: %s.%n", batchStatistics.getTransactionCount(), batchStatistics.getValidDocumentCount()); for (DetectLanguageResult detectLanguageResult : detectedBatchResult) { for (DetectedLanguage detectedLanguage : detectLanguageResult.getDetectedLanguages()) { System.out.printf("Detected language: %s, ISO 6391 name: %s, score: %s.%n", detectedLanguage.getName(), detectedLanguage.getIso6391Name(), detectedLanguage.getScore()); } } }); } /** * Code snippet for {@link TextAnalyticsAsyncClient
Reactor might / will wrap the exception in ReactiveException. The safest way to check `throwable instanceOf CosmosClientException` is this. ``` final Throwable unwrappedThrowable = reactor.core.Exceptions.unwrap(throwable); if (unwrappedThrowable instanceof CosmosClientException) { ..... } ```
public Mono<StoreResponse> invokeStoreAsync(final Uri addressUri, final RxDocumentServiceRequest request) { logger.debug("RntbdTransportClient.invokeStoreAsync({}, {})", addressUri, request); checkNotNull(addressUri, "expected non-null address"); checkNotNull(request, "expected non-null request"); this.throwIfClosed(); URI address = addressUri.getURI(); final RntbdRequestArgs requestArgs = new RntbdRequestArgs(request, address); requestArgs.traceOperation(logger, null, "invokeStoreAsync"); final RntbdEndpoint endpoint = this.endpointProvider.get(address); final RntbdRequestRecord record = endpoint.request(requestArgs); logger.debug("RntbdTransportClient.invokeStoreAsync({}, {}): {}", address, request, record); return Mono.fromFuture(record.whenComplete((response, throwable) -> { record.stage(RntbdRequestRecord.Stage.COMPLETED); if (throwable == null) { response.setRequestTimeline(record.takeTimelineSnapshot()); } else if (throwable instanceof CosmosClientException) { CosmosClientException error = (CosmosClientException) throwable; BridgeInternal.setRequestTimeline(error, record.takeTimelineSnapshot()); } })).doOnCancel(() -> { record.cancel(true); }); }
} else if (throwable instanceof CosmosClientException) {
public Mono<StoreResponse> invokeStoreAsync(final Uri addressUri, final RxDocumentServiceRequest request) { logger.debug("RntbdTransportClient.invokeStoreAsync({}, {})", addressUri, request); checkNotNull(addressUri, "expected non-null address"); checkNotNull(request, "expected non-null request"); this.throwIfClosed(); URI address = addressUri.getURI(); final RntbdRequestArgs requestArgs = new RntbdRequestArgs(request, address); requestArgs.traceOperation(logger, null, "invokeStoreAsync"); final RntbdEndpoint endpoint = this.endpointProvider.get(address); final RntbdRequestRecord record = endpoint.request(requestArgs); logger.debug("RntbdTransportClient.invokeStoreAsync({}, {}): {}", address, request, record); return Mono.fromFuture(record.whenComplete((response, throwable) -> { record.stage(RntbdRequestRecord.Stage.COMPLETED); if (throwable == null) { response.setRequestTimeline(record.takeTimelineSnapshot()); } else if (throwable instanceof CosmosClientException) { CosmosClientException error = (CosmosClientException) throwable; BridgeInternal.setRequestTimeline(error, record.takeTimelineSnapshot()); } })).doOnCancel(() -> { record.cancel(true); }); }
class RntbdTransportClient extends TransportClient { private static final String TAG_NAME = RntbdTransportClient.class.getSimpleName(); private static final AtomicLong instanceCount = new AtomicLong(); private static final Logger logger = LoggerFactory.getLogger(RntbdTransportClient.class); private final AtomicBoolean closed = new AtomicBoolean(); private final RntbdEndpoint.Provider endpointProvider; private final long id; private final Tag tag; RntbdTransportClient(final RntbdEndpoint.Provider endpointProvider) { this.endpointProvider = endpointProvider; this.id = instanceCount.incrementAndGet(); this.tag = RntbdTransportClient.tag(this.id); } RntbdTransportClient(final Options options, final SslContext sslContext) { this.endpointProvider = new RntbdServiceEndpoint.Provider(this, options, sslContext); this.id = instanceCount.incrementAndGet(); this.tag = RntbdTransportClient.tag(this.id); } RntbdTransportClient(final Configs configs, final int requestTimeoutInSeconds, final UserAgentContainer userAgent) { this(new Options.Builder(requestTimeoutInSeconds).userAgent(userAgent).build(), configs.getSslContext()); } public boolean isClosed() { return this.closed.get(); } @Override public void close() { if (this.closed.compareAndSet(false, true)) { logger.debug("close {}", this); this.endpointProvider.close(); return; } logger.debug("already closed {}", this); } public int endpointCount() { return this.endpointProvider.count(); } public int endpointEvictionCount() { return this.endpointProvider.evictions(); } public long id() { return this.id; } @Override public Tag tag() { return this.tag; } @Override public String toString() { return RntbdObjectMapper.toString(this); } private static Tag tag(long id) { return Tag.of(TAG_NAME, Strings.padStart(Long.toHexString(id).toUpperCase(), 4, '0')); } private void throwIfClosed() { checkState(!this.closed.get(), "%s is closed", this); } public static final class Options { @JsonProperty() private final int bufferPageSize; @JsonProperty() private final Duration connectionTimeout; @JsonProperty() private final Duration idleChannelTimeout; @JsonProperty() private final Duration idleEndpointTimeout; @JsonProperty() private final int maxBufferCapacity; @JsonProperty() private final int maxChannelsPerEndpoint; @JsonProperty() private final int maxRequestsPerChannel; @JsonProperty() private final Duration receiveHangDetectionTime; @JsonProperty() private final Duration requestExpiryInterval; @JsonProperty() private final Duration requestTimeout; @JsonProperty() private final Duration requestTimerResolution; @JsonProperty() private final Duration sendHangDetectionTime; @JsonProperty() private final Duration shutdownTimeout; @JsonIgnore() private final UserAgentContainer userAgent; private Options() { this.bufferPageSize = 8192; this.connectionTimeout = null; this.idleChannelTimeout = Duration.ZERO; this.idleEndpointTimeout = Duration.ofSeconds(70L); this.maxBufferCapacity = 8192 << 10; this.maxChannelsPerEndpoint = 10; this.maxRequestsPerChannel = 30; this.receiveHangDetectionTime = Duration.ofSeconds(65L); this.requestExpiryInterval = Duration.ofSeconds(5L); this.requestTimeout = null; this.requestTimerResolution = Duration.ofMillis(5L); this.sendHangDetectionTime = Duration.ofSeconds(10L); this.shutdownTimeout = Duration.ofSeconds(15L); this.userAgent = new UserAgentContainer(); } private Options(Builder builder) { this.bufferPageSize = builder.bufferPageSize; this.idleChannelTimeout = builder.idleChannelTimeout; this.idleEndpointTimeout = builder.idleEndpointTimeout; this.maxBufferCapacity = builder.maxBufferCapacity; this.maxChannelsPerEndpoint = builder.maxChannelsPerEndpoint; this.maxRequestsPerChannel = builder.maxRequestsPerChannel; this.receiveHangDetectionTime = builder.receiveHangDetectionTime; this.requestExpiryInterval = builder.requestExpiryInterval; this.requestTimeout = builder.requestTimeout; this.requestTimerResolution = builder.requestTimerResolution; this.sendHangDetectionTime = builder.sendHangDetectionTime; this.shutdownTimeout = builder.shutdownTimeout; this.userAgent = builder.userAgent; this.connectionTimeout = builder.connectionTimeout == null ? builder.requestTimeout : builder.connectionTimeout; } public int bufferPageSize() { return this.bufferPageSize; } public Duration connectionTimeout() { return this.connectionTimeout; } public Duration idleChannelTimeout() { return this.idleChannelTimeout; } public Duration idleEndpointTimeout() { return this.idleEndpointTimeout; } public int maxBufferCapacity() { return this.maxBufferCapacity; } public int maxChannelsPerEndpoint() { return this.maxChannelsPerEndpoint; } public int maxRequestsPerChannel() { return this.maxRequestsPerChannel; } public Duration receiveHangDetectionTime() { return this.receiveHangDetectionTime; } public Duration requestExpiryInterval() { return this.requestExpiryInterval; } public Duration requestTimeout() { return this.requestTimeout; } public Duration requestTimerResolution() { return this.requestTimerResolution; } public Duration sendHangDetectionTime() { return this.sendHangDetectionTime; } public Duration shutdownTimeout() { return this.shutdownTimeout; } public UserAgentContainer userAgent() { return this.userAgent; } @Override public String toString() { return RntbdObjectMapper.toJson(this); } /** * A builder for constructing {@link Options} instances. * * <h3>Using system properties to set the default {@link Options} used by an {@link Builder}</h3> * <p> * A default options instance is created when the {@link Builder} class is initialized. This instance specifies * the default options used by every {@link Builder} instance. In priority order the default options instance * is created from: * <ol> * <li>The JSON value of system property {@code azure.cosmos.directTcp.defaultOptions}. * <p>Example: * <pre>{@code -Dazure.cosmos.directTcp.defaultOptions={\"maxChannelsPerEndpoint\":5,\"maxRequestsPerChannel\":30}}</pre> * </li> * <li>The contents of the JSON file located by system property {@code azure.cosmos.directTcp * .defaultOptionsFile}. * <p>Example: * <pre>{@code -Dazure.cosmos.directTcp.defaultOptionsFile=/path/to/default/options/file}</pre> * </li> * <li>The contents of JSON resource file {@code azure.cosmos.directTcp.defaultOptions.json}. * <p>Specifically, the resource file is read from this stream: * <pre>{@code RntbdTransportClient.class.getClassLoader().getResourceAsStream("azure.cosmos.directTcp.defaultOptions.json")}</pre> * <p>Example: <pre>{@code { * "bufferPageSize": 8192, * "connectionTimeout": "PT1M", * "idleChannelTimeout": "PT0S", * "idleEndpointTimeout": "PT1M10S", * "maxBufferCapacity": 8388608, * "maxChannelsPerEndpoint": 10, * "maxRequestsPerChannel": 30, * "receiveHangDetectionTime": "PT1M5S", * "requestExpiryInterval": "PT5S", * "requestTimeout": "PT1M", * "requestTimerResolution": "PT0.5S", * "sendHangDetectionTime": "PT10S", * "shutdownTimeout": "PT15S" * }}</pre> * </li> * </ol> * <p>JSON value errors are logged and then ignored. If none of the above values are available or all available * values are in error, the default options instance is created from the private parameterless constructor for * {@link Options}. */ @SuppressWarnings("UnusedReturnValue") public static class Builder { private static final String DEFAULT_OPTIONS_PROPERTY_NAME = "azure.cosmos.directTcp.defaultOptions"; private static final Options DEFAULT_OPTIONS; static { Options options = null; try { final String string = System.getProperty(DEFAULT_OPTIONS_PROPERTY_NAME); if (string != null) { try { options = RntbdObjectMapper.readValue(string, Options.class); } catch (IOException error) { logger.error("failed to parse default Direct TCP options {} due to ", string, error); } } if (options == null) { final String path = System.getProperty(DEFAULT_OPTIONS_PROPERTY_NAME + "File"); if (path != null) { try { options = RntbdObjectMapper.readValue(new File(path), Options.class); } catch (IOException error) { logger.error("failed to load default Direct TCP options from {} due to ", path, error); } } } if (options == null) { final ClassLoader loader = RntbdTransportClient.class.getClassLoader(); final String name = DEFAULT_OPTIONS_PROPERTY_NAME + ".json"; try (InputStream stream = loader.getResourceAsStream(name)) { if (stream != null) { options = RntbdObjectMapper.readValue(stream, Options.class); } } catch (IOException error) { logger.error("failed to load Direct TCP options from resource {} due to ", name, error); } } } finally { if (options == null) { DEFAULT_OPTIONS = new Options(); } else { logger.info("Updated default Direct TCP options from system property {}: {}", DEFAULT_OPTIONS_PROPERTY_NAME, options); DEFAULT_OPTIONS = options; } } } private int bufferPageSize; private Duration connectionTimeout; private Duration idleChannelTimeout; private Duration idleEndpointTimeout; private int maxBufferCapacity; private int maxChannelsPerEndpoint; private int maxRequestsPerChannel; private Duration receiveHangDetectionTime; private Duration requestExpiryInterval; private Duration requestTimeout; private Duration requestTimerResolution; private Duration sendHangDetectionTime; private Duration shutdownTimeout; private UserAgentContainer userAgent; public Builder(Duration requestTimeout) { this.requestTimeout(requestTimeout); this.bufferPageSize = DEFAULT_OPTIONS.bufferPageSize; this.connectionTimeout = DEFAULT_OPTIONS.connectionTimeout; this.idleChannelTimeout = DEFAULT_OPTIONS.idleChannelTimeout; this.idleEndpointTimeout = DEFAULT_OPTIONS.idleEndpointTimeout; this.maxBufferCapacity = DEFAULT_OPTIONS.maxBufferCapacity; this.maxChannelsPerEndpoint = DEFAULT_OPTIONS.maxChannelsPerEndpoint; this.maxRequestsPerChannel = DEFAULT_OPTIONS.maxRequestsPerChannel; this.receiveHangDetectionTime = DEFAULT_OPTIONS.receiveHangDetectionTime; this.requestExpiryInterval = DEFAULT_OPTIONS.requestExpiryInterval; this.requestTimerResolution = DEFAULT_OPTIONS.requestTimerResolution; this.sendHangDetectionTime = DEFAULT_OPTIONS.sendHangDetectionTime; this.shutdownTimeout = DEFAULT_OPTIONS.shutdownTimeout; this.userAgent = DEFAULT_OPTIONS.userAgent; } public Builder(int requestTimeoutInSeconds) { this(Duration.ofSeconds(requestTimeoutInSeconds)); } public Builder bufferPageSize(final int value) { checkArgument(value >= 4096 && (value & (value - 1)) == 0, "expected value to be a power of 2 >= 4096, not %s", value); this.bufferPageSize = value; return this; } public Options build() { checkState(this.bufferPageSize <= this.maxBufferCapacity, "expected bufferPageSize (%s) <= maxBufferCapacity (%s)", this.bufferPageSize, this.maxBufferCapacity); return new Options(this); } public Builder connectionTimeout(final Duration value) { checkArgument(value == null || value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.connectionTimeout = value; return this; } public Builder idleChannelTimeout(final Duration value) { checkNotNull(value, "expected non-null value"); this.idleChannelTimeout = value; return this; } public Builder idleEndpointTimeout(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.idleEndpointTimeout = value; return this; } public Builder maxBufferCapacity(final int value) { checkArgument(value > 0 && (value & (value - 1)) == 0, "expected positive value, not %s", value); this.maxBufferCapacity = value; return this; } public Builder maxChannelsPerEndpoint(final int value) { checkArgument(value > 0, "expected positive value, not %s", value); this.maxChannelsPerEndpoint = value; return this; } public Builder maxRequestsPerChannel(final int value) { checkArgument(value > 0, "expected positive value, not %s", value); this.maxRequestsPerChannel = value; return this; } public Builder receiveHangDetectionTime(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.receiveHangDetectionTime = value; return this; } public Builder requestExpiryInterval(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.requestExpiryInterval = value; return this; } public Builder requestTimeout(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.requestTimeout = value; return this; } public Builder requestTimerResolution(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.requestTimerResolution = value; return this; } public Builder sendHangDetectionTime(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.sendHangDetectionTime = value; return this; } public Builder shutdownTimeout(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.shutdownTimeout = value; return this; } public Builder userAgent(final UserAgentContainer value) { checkNotNull(value, "expected non-null value"); this.userAgent = value; return this; } } } static final class JsonSerializer extends StdSerializer<RntbdTransportClient> { private static final long serialVersionUID = 1007663695768825670L; JsonSerializer() { super(RntbdTransportClient.class); } @Override public void serialize( final RntbdTransportClient value, final JsonGenerator generator, final SerializerProvider provider ) throws IOException { generator.writeStartObject(); generator.writeNumberField("id", value.id()); generator.writeBooleanField("isClosed", value.isClosed()); generator.writeObjectField("configuration", value.endpointProvider.config()); generator.writeObjectFieldStart("serviceEndpoints"); generator.writeNumberField("count", value.endpointCount()); generator.writeArrayFieldStart("items"); for (final Iterator<RntbdEndpoint> iterator = value.endpointProvider.list().iterator(); iterator.hasNext(); ) { generator.writeObject(iterator.next()); } generator.writeEndArray(); generator.writeEndObject(); generator.writeEndObject(); } } }
class RntbdTransportClient extends TransportClient { private static final String TAG_NAME = RntbdTransportClient.class.getSimpleName(); private static final AtomicLong instanceCount = new AtomicLong(); private static final Logger logger = LoggerFactory.getLogger(RntbdTransportClient.class); private final AtomicBoolean closed = new AtomicBoolean(); private final RntbdEndpoint.Provider endpointProvider; private final long id; private final Tag tag; RntbdTransportClient(final RntbdEndpoint.Provider endpointProvider) { this.endpointProvider = endpointProvider; this.id = instanceCount.incrementAndGet(); this.tag = RntbdTransportClient.tag(this.id); } RntbdTransportClient(final Options options, final SslContext sslContext) { this.endpointProvider = new RntbdServiceEndpoint.Provider(this, options, sslContext); this.id = instanceCount.incrementAndGet(); this.tag = RntbdTransportClient.tag(this.id); } RntbdTransportClient(final Configs configs, final int requestTimeoutInSeconds, final UserAgentContainer userAgent) { this(new Options.Builder(requestTimeoutInSeconds).userAgent(userAgent).build(), configs.getSslContext()); } public boolean isClosed() { return this.closed.get(); } @Override public void close() { if (this.closed.compareAndSet(false, true)) { logger.debug("close {}", this); this.endpointProvider.close(); return; } logger.debug("already closed {}", this); } public int endpointCount() { return this.endpointProvider.count(); } public int endpointEvictionCount() { return this.endpointProvider.evictions(); } public long id() { return this.id; } @Override public Tag tag() { return this.tag; } @Override public String toString() { return RntbdObjectMapper.toString(this); } private static Tag tag(long id) { return Tag.of(TAG_NAME, Strings.padStart(Long.toHexString(id).toUpperCase(), 4, '0')); } private void throwIfClosed() { checkState(!this.closed.get(), "%s is closed", this); } public static final class Options { @JsonProperty() private final int bufferPageSize; @JsonProperty() private final Duration connectionTimeout; @JsonProperty() private final Duration idleChannelTimeout; @JsonProperty() private final Duration idleEndpointTimeout; @JsonProperty() private final int maxBufferCapacity; @JsonProperty() private final int maxChannelsPerEndpoint; @JsonProperty() private final int maxRequestsPerChannel; @JsonProperty() private final Duration receiveHangDetectionTime; @JsonProperty() private final Duration requestExpiryInterval; @JsonProperty() private final Duration requestTimeout; @JsonProperty() private final Duration requestTimerResolution; @JsonProperty() private final Duration sendHangDetectionTime; @JsonProperty() private final Duration shutdownTimeout; @JsonIgnore() private final UserAgentContainer userAgent; private Options() { this.bufferPageSize = 8192; this.connectionTimeout = null; this.idleChannelTimeout = Duration.ZERO; this.idleEndpointTimeout = Duration.ofSeconds(70L); this.maxBufferCapacity = 8192 << 10; this.maxChannelsPerEndpoint = 10; this.maxRequestsPerChannel = 30; this.receiveHangDetectionTime = Duration.ofSeconds(65L); this.requestExpiryInterval = Duration.ofSeconds(5L); this.requestTimeout = null; this.requestTimerResolution = Duration.ofMillis(5L); this.sendHangDetectionTime = Duration.ofSeconds(10L); this.shutdownTimeout = Duration.ofSeconds(15L); this.userAgent = new UserAgentContainer(); } private Options(Builder builder) { this.bufferPageSize = builder.bufferPageSize; this.idleChannelTimeout = builder.idleChannelTimeout; this.idleEndpointTimeout = builder.idleEndpointTimeout; this.maxBufferCapacity = builder.maxBufferCapacity; this.maxChannelsPerEndpoint = builder.maxChannelsPerEndpoint; this.maxRequestsPerChannel = builder.maxRequestsPerChannel; this.receiveHangDetectionTime = builder.receiveHangDetectionTime; this.requestExpiryInterval = builder.requestExpiryInterval; this.requestTimeout = builder.requestTimeout; this.requestTimerResolution = builder.requestTimerResolution; this.sendHangDetectionTime = builder.sendHangDetectionTime; this.shutdownTimeout = builder.shutdownTimeout; this.userAgent = builder.userAgent; this.connectionTimeout = builder.connectionTimeout == null ? builder.requestTimeout : builder.connectionTimeout; } public int bufferPageSize() { return this.bufferPageSize; } public Duration connectionTimeout() { return this.connectionTimeout; } public Duration idleChannelTimeout() { return this.idleChannelTimeout; } public Duration idleEndpointTimeout() { return this.idleEndpointTimeout; } public int maxBufferCapacity() { return this.maxBufferCapacity; } public int maxChannelsPerEndpoint() { return this.maxChannelsPerEndpoint; } public int maxRequestsPerChannel() { return this.maxRequestsPerChannel; } public Duration receiveHangDetectionTime() { return this.receiveHangDetectionTime; } public Duration requestExpiryInterval() { return this.requestExpiryInterval; } public Duration requestTimeout() { return this.requestTimeout; } public Duration requestTimerResolution() { return this.requestTimerResolution; } public Duration sendHangDetectionTime() { return this.sendHangDetectionTime; } public Duration shutdownTimeout() { return this.shutdownTimeout; } public UserAgentContainer userAgent() { return this.userAgent; } @Override public String toString() { return RntbdObjectMapper.toJson(this); } /** * A builder for constructing {@link Options} instances. * * <h3>Using system properties to set the default {@link Options} used by an {@link Builder}</h3> * <p> * A default options instance is created when the {@link Builder} class is initialized. This instance specifies * the default options used by every {@link Builder} instance. In priority order the default options instance * is created from: * <ol> * <li>The JSON value of system property {@code azure.cosmos.directTcp.defaultOptions}. * <p>Example: * <pre>{@code -Dazure.cosmos.directTcp.defaultOptions={\"maxChannelsPerEndpoint\":5,\"maxRequestsPerChannel\":30}}</pre> * </li> * <li>The contents of the JSON file located by system property {@code azure.cosmos.directTcp * .defaultOptionsFile}. * <p>Example: * <pre>{@code -Dazure.cosmos.directTcp.defaultOptionsFile=/path/to/default/options/file}</pre> * </li> * <li>The contents of JSON resource file {@code azure.cosmos.directTcp.defaultOptions.json}. * <p>Specifically, the resource file is read from this stream: * <pre>{@code RntbdTransportClient.class.getClassLoader().getResourceAsStream("azure.cosmos.directTcp.defaultOptions.json")}</pre> * <p>Example: <pre>{@code { * "bufferPageSize": 8192, * "connectionTimeout": "PT1M", * "idleChannelTimeout": "PT0S", * "idleEndpointTimeout": "PT1M10S", * "maxBufferCapacity": 8388608, * "maxChannelsPerEndpoint": 10, * "maxRequestsPerChannel": 30, * "receiveHangDetectionTime": "PT1M5S", * "requestExpiryInterval": "PT5S", * "requestTimeout": "PT1M", * "requestTimerResolution": "PT0.5S", * "sendHangDetectionTime": "PT10S", * "shutdownTimeout": "PT15S" * }}</pre> * </li> * </ol> * <p>JSON value errors are logged and then ignored. If none of the above values are available or all available * values are in error, the default options instance is created from the private parameterless constructor for * {@link Options}. */ @SuppressWarnings("UnusedReturnValue") public static class Builder { private static final String DEFAULT_OPTIONS_PROPERTY_NAME = "azure.cosmos.directTcp.defaultOptions"; private static final Options DEFAULT_OPTIONS; static { Options options = null; try { final String string = System.getProperty(DEFAULT_OPTIONS_PROPERTY_NAME); if (string != null) { try { options = RntbdObjectMapper.readValue(string, Options.class); } catch (IOException error) { logger.error("failed to parse default Direct TCP options {} due to ", string, error); } } if (options == null) { final String path = System.getProperty(DEFAULT_OPTIONS_PROPERTY_NAME + "File"); if (path != null) { try { options = RntbdObjectMapper.readValue(new File(path), Options.class); } catch (IOException error) { logger.error("failed to load default Direct TCP options from {} due to ", path, error); } } } if (options == null) { final ClassLoader loader = RntbdTransportClient.class.getClassLoader(); final String name = DEFAULT_OPTIONS_PROPERTY_NAME + ".json"; try (InputStream stream = loader.getResourceAsStream(name)) { if (stream != null) { options = RntbdObjectMapper.readValue(stream, Options.class); } } catch (IOException error) { logger.error("failed to load Direct TCP options from resource {} due to ", name, error); } } } finally { if (options == null) { DEFAULT_OPTIONS = new Options(); } else { logger.info("Updated default Direct TCP options from system property {}: {}", DEFAULT_OPTIONS_PROPERTY_NAME, options); DEFAULT_OPTIONS = options; } } } private int bufferPageSize; private Duration connectionTimeout; private Duration idleChannelTimeout; private Duration idleEndpointTimeout; private int maxBufferCapacity; private int maxChannelsPerEndpoint; private int maxRequestsPerChannel; private Duration receiveHangDetectionTime; private Duration requestExpiryInterval; private Duration requestTimeout; private Duration requestTimerResolution; private Duration sendHangDetectionTime; private Duration shutdownTimeout; private UserAgentContainer userAgent; public Builder(Duration requestTimeout) { this.requestTimeout(requestTimeout); this.bufferPageSize = DEFAULT_OPTIONS.bufferPageSize; this.connectionTimeout = DEFAULT_OPTIONS.connectionTimeout; this.idleChannelTimeout = DEFAULT_OPTIONS.idleChannelTimeout; this.idleEndpointTimeout = DEFAULT_OPTIONS.idleEndpointTimeout; this.maxBufferCapacity = DEFAULT_OPTIONS.maxBufferCapacity; this.maxChannelsPerEndpoint = DEFAULT_OPTIONS.maxChannelsPerEndpoint; this.maxRequestsPerChannel = DEFAULT_OPTIONS.maxRequestsPerChannel; this.receiveHangDetectionTime = DEFAULT_OPTIONS.receiveHangDetectionTime; this.requestExpiryInterval = DEFAULT_OPTIONS.requestExpiryInterval; this.requestTimerResolution = DEFAULT_OPTIONS.requestTimerResolution; this.sendHangDetectionTime = DEFAULT_OPTIONS.sendHangDetectionTime; this.shutdownTimeout = DEFAULT_OPTIONS.shutdownTimeout; this.userAgent = DEFAULT_OPTIONS.userAgent; } public Builder(int requestTimeoutInSeconds) { this(Duration.ofSeconds(requestTimeoutInSeconds)); } public Builder bufferPageSize(final int value) { checkArgument(value >= 4096 && (value & (value - 1)) == 0, "expected value to be a power of 2 >= 4096, not %s", value); this.bufferPageSize = value; return this; } public Options build() { checkState(this.bufferPageSize <= this.maxBufferCapacity, "expected bufferPageSize (%s) <= maxBufferCapacity (%s)", this.bufferPageSize, this.maxBufferCapacity); return new Options(this); } public Builder connectionTimeout(final Duration value) { checkArgument(value == null || value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.connectionTimeout = value; return this; } public Builder idleChannelTimeout(final Duration value) { checkNotNull(value, "expected non-null value"); this.idleChannelTimeout = value; return this; } public Builder idleEndpointTimeout(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.idleEndpointTimeout = value; return this; } public Builder maxBufferCapacity(final int value) { checkArgument(value > 0 && (value & (value - 1)) == 0, "expected positive value, not %s", value); this.maxBufferCapacity = value; return this; } public Builder maxChannelsPerEndpoint(final int value) { checkArgument(value > 0, "expected positive value, not %s", value); this.maxChannelsPerEndpoint = value; return this; } public Builder maxRequestsPerChannel(final int value) { checkArgument(value > 0, "expected positive value, not %s", value); this.maxRequestsPerChannel = value; return this; } public Builder receiveHangDetectionTime(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.receiveHangDetectionTime = value; return this; } public Builder requestExpiryInterval(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.requestExpiryInterval = value; return this; } public Builder requestTimeout(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.requestTimeout = value; return this; } public Builder requestTimerResolution(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.requestTimerResolution = value; return this; } public Builder sendHangDetectionTime(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.sendHangDetectionTime = value; return this; } public Builder shutdownTimeout(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.shutdownTimeout = value; return this; } public Builder userAgent(final UserAgentContainer value) { checkNotNull(value, "expected non-null value"); this.userAgent = value; return this; } } } static final class JsonSerializer extends StdSerializer<RntbdTransportClient> { private static final long serialVersionUID = 1007663695768825670L; JsonSerializer() { super(RntbdTransportClient.class); } @Override public void serialize( final RntbdTransportClient value, final JsonGenerator generator, final SerializerProvider provider ) throws IOException { generator.writeStartObject(); generator.writeNumberField("id", value.id()); generator.writeBooleanField("isClosed", value.isClosed()); generator.writeObjectField("configuration", value.endpointProvider.config()); generator.writeObjectFieldStart("serviceEndpoints"); generator.writeNumberField("count", value.endpointCount()); generator.writeArrayFieldStart("items"); for (final Iterator<RntbdEndpoint> iterator = value.endpointProvider.list().iterator(); iterator.hasNext(); ) { generator.writeObject(iterator.next()); } generator.writeEndArray(); generator.writeEndObject(); generator.writeEndObject(); } } }
@kushagraThapar `reactor.core.Exceptions.unwrap` should not be needed in this layer. right?
public Mono<StoreResponse> invokeStoreAsync(final Uri addressUri, final RxDocumentServiceRequest request) { logger.debug("RntbdTransportClient.invokeStoreAsync({}, {})", addressUri, request); checkNotNull(addressUri, "expected non-null address"); checkNotNull(request, "expected non-null request"); this.throwIfClosed(); URI address = addressUri.getURI(); final RntbdRequestArgs requestArgs = new RntbdRequestArgs(request, address); requestArgs.traceOperation(logger, null, "invokeStoreAsync"); final RntbdEndpoint endpoint = this.endpointProvider.get(address); final RntbdRequestRecord record = endpoint.request(requestArgs); logger.debug("RntbdTransportClient.invokeStoreAsync({}, {}): {}", address, request, record); return Mono.fromFuture(record.whenComplete((response, throwable) -> { record.stage(RntbdRequestRecord.Stage.COMPLETED); if (throwable == null) { response.setRequestTimeline(record.takeTimelineSnapshot()); } else if (throwable instanceof CosmosClientException) { CosmosClientException error = (CosmosClientException) throwable; BridgeInternal.setRequestTimeline(error, record.takeTimelineSnapshot()); } })).doOnCancel(() -> { record.cancel(true); }); }
} else if (throwable instanceof CosmosClientException) {
public Mono<StoreResponse> invokeStoreAsync(final Uri addressUri, final RxDocumentServiceRequest request) { logger.debug("RntbdTransportClient.invokeStoreAsync({}, {})", addressUri, request); checkNotNull(addressUri, "expected non-null address"); checkNotNull(request, "expected non-null request"); this.throwIfClosed(); URI address = addressUri.getURI(); final RntbdRequestArgs requestArgs = new RntbdRequestArgs(request, address); requestArgs.traceOperation(logger, null, "invokeStoreAsync"); final RntbdEndpoint endpoint = this.endpointProvider.get(address); final RntbdRequestRecord record = endpoint.request(requestArgs); logger.debug("RntbdTransportClient.invokeStoreAsync({}, {}): {}", address, request, record); return Mono.fromFuture(record.whenComplete((response, throwable) -> { record.stage(RntbdRequestRecord.Stage.COMPLETED); if (throwable == null) { response.setRequestTimeline(record.takeTimelineSnapshot()); } else if (throwable instanceof CosmosClientException) { CosmosClientException error = (CosmosClientException) throwable; BridgeInternal.setRequestTimeline(error, record.takeTimelineSnapshot()); } })).doOnCancel(() -> { record.cancel(true); }); }
class RntbdTransportClient extends TransportClient { private static final String TAG_NAME = RntbdTransportClient.class.getSimpleName(); private static final AtomicLong instanceCount = new AtomicLong(); private static final Logger logger = LoggerFactory.getLogger(RntbdTransportClient.class); private final AtomicBoolean closed = new AtomicBoolean(); private final RntbdEndpoint.Provider endpointProvider; private final long id; private final Tag tag; RntbdTransportClient(final RntbdEndpoint.Provider endpointProvider) { this.endpointProvider = endpointProvider; this.id = instanceCount.incrementAndGet(); this.tag = RntbdTransportClient.tag(this.id); } RntbdTransportClient(final Options options, final SslContext sslContext) { this.endpointProvider = new RntbdServiceEndpoint.Provider(this, options, sslContext); this.id = instanceCount.incrementAndGet(); this.tag = RntbdTransportClient.tag(this.id); } RntbdTransportClient(final Configs configs, final int requestTimeoutInSeconds, final UserAgentContainer userAgent) { this(new Options.Builder(requestTimeoutInSeconds).userAgent(userAgent).build(), configs.getSslContext()); } public boolean isClosed() { return this.closed.get(); } @Override public void close() { if (this.closed.compareAndSet(false, true)) { logger.debug("close {}", this); this.endpointProvider.close(); return; } logger.debug("already closed {}", this); } public int endpointCount() { return this.endpointProvider.count(); } public int endpointEvictionCount() { return this.endpointProvider.evictions(); } public long id() { return this.id; } @Override public Tag tag() { return this.tag; } @Override public String toString() { return RntbdObjectMapper.toString(this); } private static Tag tag(long id) { return Tag.of(TAG_NAME, Strings.padStart(Long.toHexString(id).toUpperCase(), 4, '0')); } private void throwIfClosed() { checkState(!this.closed.get(), "%s is closed", this); } public static final class Options { @JsonProperty() private final int bufferPageSize; @JsonProperty() private final Duration connectionTimeout; @JsonProperty() private final Duration idleChannelTimeout; @JsonProperty() private final Duration idleEndpointTimeout; @JsonProperty() private final int maxBufferCapacity; @JsonProperty() private final int maxChannelsPerEndpoint; @JsonProperty() private final int maxRequestsPerChannel; @JsonProperty() private final Duration receiveHangDetectionTime; @JsonProperty() private final Duration requestExpiryInterval; @JsonProperty() private final Duration requestTimeout; @JsonProperty() private final Duration requestTimerResolution; @JsonProperty() private final Duration sendHangDetectionTime; @JsonProperty() private final Duration shutdownTimeout; @JsonIgnore() private final UserAgentContainer userAgent; private Options() { this.bufferPageSize = 8192; this.connectionTimeout = null; this.idleChannelTimeout = Duration.ZERO; this.idleEndpointTimeout = Duration.ofSeconds(70L); this.maxBufferCapacity = 8192 << 10; this.maxChannelsPerEndpoint = 10; this.maxRequestsPerChannel = 30; this.receiveHangDetectionTime = Duration.ofSeconds(65L); this.requestExpiryInterval = Duration.ofSeconds(5L); this.requestTimeout = null; this.requestTimerResolution = Duration.ofMillis(5L); this.sendHangDetectionTime = Duration.ofSeconds(10L); this.shutdownTimeout = Duration.ofSeconds(15L); this.userAgent = new UserAgentContainer(); } private Options(Builder builder) { this.bufferPageSize = builder.bufferPageSize; this.idleChannelTimeout = builder.idleChannelTimeout; this.idleEndpointTimeout = builder.idleEndpointTimeout; this.maxBufferCapacity = builder.maxBufferCapacity; this.maxChannelsPerEndpoint = builder.maxChannelsPerEndpoint; this.maxRequestsPerChannel = builder.maxRequestsPerChannel; this.receiveHangDetectionTime = builder.receiveHangDetectionTime; this.requestExpiryInterval = builder.requestExpiryInterval; this.requestTimeout = builder.requestTimeout; this.requestTimerResolution = builder.requestTimerResolution; this.sendHangDetectionTime = builder.sendHangDetectionTime; this.shutdownTimeout = builder.shutdownTimeout; this.userAgent = builder.userAgent; this.connectionTimeout = builder.connectionTimeout == null ? builder.requestTimeout : builder.connectionTimeout; } public int bufferPageSize() { return this.bufferPageSize; } public Duration connectionTimeout() { return this.connectionTimeout; } public Duration idleChannelTimeout() { return this.idleChannelTimeout; } public Duration idleEndpointTimeout() { return this.idleEndpointTimeout; } public int maxBufferCapacity() { return this.maxBufferCapacity; } public int maxChannelsPerEndpoint() { return this.maxChannelsPerEndpoint; } public int maxRequestsPerChannel() { return this.maxRequestsPerChannel; } public Duration receiveHangDetectionTime() { return this.receiveHangDetectionTime; } public Duration requestExpiryInterval() { return this.requestExpiryInterval; } public Duration requestTimeout() { return this.requestTimeout; } public Duration requestTimerResolution() { return this.requestTimerResolution; } public Duration sendHangDetectionTime() { return this.sendHangDetectionTime; } public Duration shutdownTimeout() { return this.shutdownTimeout; } public UserAgentContainer userAgent() { return this.userAgent; } @Override public String toString() { return RntbdObjectMapper.toJson(this); } /** * A builder for constructing {@link Options} instances. * * <h3>Using system properties to set the default {@link Options} used by an {@link Builder}</h3> * <p> * A default options instance is created when the {@link Builder} class is initialized. This instance specifies * the default options used by every {@link Builder} instance. In priority order the default options instance * is created from: * <ol> * <li>The JSON value of system property {@code azure.cosmos.directTcp.defaultOptions}. * <p>Example: * <pre>{@code -Dazure.cosmos.directTcp.defaultOptions={\"maxChannelsPerEndpoint\":5,\"maxRequestsPerChannel\":30}}</pre> * </li> * <li>The contents of the JSON file located by system property {@code azure.cosmos.directTcp * .defaultOptionsFile}. * <p>Example: * <pre>{@code -Dazure.cosmos.directTcp.defaultOptionsFile=/path/to/default/options/file}</pre> * </li> * <li>The contents of JSON resource file {@code azure.cosmos.directTcp.defaultOptions.json}. * <p>Specifically, the resource file is read from this stream: * <pre>{@code RntbdTransportClient.class.getClassLoader().getResourceAsStream("azure.cosmos.directTcp.defaultOptions.json")}</pre> * <p>Example: <pre>{@code { * "bufferPageSize": 8192, * "connectionTimeout": "PT1M", * "idleChannelTimeout": "PT0S", * "idleEndpointTimeout": "PT1M10S", * "maxBufferCapacity": 8388608, * "maxChannelsPerEndpoint": 10, * "maxRequestsPerChannel": 30, * "receiveHangDetectionTime": "PT1M5S", * "requestExpiryInterval": "PT5S", * "requestTimeout": "PT1M", * "requestTimerResolution": "PT0.5S", * "sendHangDetectionTime": "PT10S", * "shutdownTimeout": "PT15S" * }}</pre> * </li> * </ol> * <p>JSON value errors are logged and then ignored. If none of the above values are available or all available * values are in error, the default options instance is created from the private parameterless constructor for * {@link Options}. */ @SuppressWarnings("UnusedReturnValue") public static class Builder { private static final String DEFAULT_OPTIONS_PROPERTY_NAME = "azure.cosmos.directTcp.defaultOptions"; private static final Options DEFAULT_OPTIONS; static { Options options = null; try { final String string = System.getProperty(DEFAULT_OPTIONS_PROPERTY_NAME); if (string != null) { try { options = RntbdObjectMapper.readValue(string, Options.class); } catch (IOException error) { logger.error("failed to parse default Direct TCP options {} due to ", string, error); } } if (options == null) { final String path = System.getProperty(DEFAULT_OPTIONS_PROPERTY_NAME + "File"); if (path != null) { try { options = RntbdObjectMapper.readValue(new File(path), Options.class); } catch (IOException error) { logger.error("failed to load default Direct TCP options from {} due to ", path, error); } } } if (options == null) { final ClassLoader loader = RntbdTransportClient.class.getClassLoader(); final String name = DEFAULT_OPTIONS_PROPERTY_NAME + ".json"; try (InputStream stream = loader.getResourceAsStream(name)) { if (stream != null) { options = RntbdObjectMapper.readValue(stream, Options.class); } } catch (IOException error) { logger.error("failed to load Direct TCP options from resource {} due to ", name, error); } } } finally { if (options == null) { DEFAULT_OPTIONS = new Options(); } else { logger.info("Updated default Direct TCP options from system property {}: {}", DEFAULT_OPTIONS_PROPERTY_NAME, options); DEFAULT_OPTIONS = options; } } } private int bufferPageSize; private Duration connectionTimeout; private Duration idleChannelTimeout; private Duration idleEndpointTimeout; private int maxBufferCapacity; private int maxChannelsPerEndpoint; private int maxRequestsPerChannel; private Duration receiveHangDetectionTime; private Duration requestExpiryInterval; private Duration requestTimeout; private Duration requestTimerResolution; private Duration sendHangDetectionTime; private Duration shutdownTimeout; private UserAgentContainer userAgent; public Builder(Duration requestTimeout) { this.requestTimeout(requestTimeout); this.bufferPageSize = DEFAULT_OPTIONS.bufferPageSize; this.connectionTimeout = DEFAULT_OPTIONS.connectionTimeout; this.idleChannelTimeout = DEFAULT_OPTIONS.idleChannelTimeout; this.idleEndpointTimeout = DEFAULT_OPTIONS.idleEndpointTimeout; this.maxBufferCapacity = DEFAULT_OPTIONS.maxBufferCapacity; this.maxChannelsPerEndpoint = DEFAULT_OPTIONS.maxChannelsPerEndpoint; this.maxRequestsPerChannel = DEFAULT_OPTIONS.maxRequestsPerChannel; this.receiveHangDetectionTime = DEFAULT_OPTIONS.receiveHangDetectionTime; this.requestExpiryInterval = DEFAULT_OPTIONS.requestExpiryInterval; this.requestTimerResolution = DEFAULT_OPTIONS.requestTimerResolution; this.sendHangDetectionTime = DEFAULT_OPTIONS.sendHangDetectionTime; this.shutdownTimeout = DEFAULT_OPTIONS.shutdownTimeout; this.userAgent = DEFAULT_OPTIONS.userAgent; } public Builder(int requestTimeoutInSeconds) { this(Duration.ofSeconds(requestTimeoutInSeconds)); } public Builder bufferPageSize(final int value) { checkArgument(value >= 4096 && (value & (value - 1)) == 0, "expected value to be a power of 2 >= 4096, not %s", value); this.bufferPageSize = value; return this; } public Options build() { checkState(this.bufferPageSize <= this.maxBufferCapacity, "expected bufferPageSize (%s) <= maxBufferCapacity (%s)", this.bufferPageSize, this.maxBufferCapacity); return new Options(this); } public Builder connectionTimeout(final Duration value) { checkArgument(value == null || value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.connectionTimeout = value; return this; } public Builder idleChannelTimeout(final Duration value) { checkNotNull(value, "expected non-null value"); this.idleChannelTimeout = value; return this; } public Builder idleEndpointTimeout(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.idleEndpointTimeout = value; return this; } public Builder maxBufferCapacity(final int value) { checkArgument(value > 0 && (value & (value - 1)) == 0, "expected positive value, not %s", value); this.maxBufferCapacity = value; return this; } public Builder maxChannelsPerEndpoint(final int value) { checkArgument(value > 0, "expected positive value, not %s", value); this.maxChannelsPerEndpoint = value; return this; } public Builder maxRequestsPerChannel(final int value) { checkArgument(value > 0, "expected positive value, not %s", value); this.maxRequestsPerChannel = value; return this; } public Builder receiveHangDetectionTime(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.receiveHangDetectionTime = value; return this; } public Builder requestExpiryInterval(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.requestExpiryInterval = value; return this; } public Builder requestTimeout(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.requestTimeout = value; return this; } public Builder requestTimerResolution(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.requestTimerResolution = value; return this; } public Builder sendHangDetectionTime(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.sendHangDetectionTime = value; return this; } public Builder shutdownTimeout(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.shutdownTimeout = value; return this; } public Builder userAgent(final UserAgentContainer value) { checkNotNull(value, "expected non-null value"); this.userAgent = value; return this; } } } static final class JsonSerializer extends StdSerializer<RntbdTransportClient> { private static final long serialVersionUID = 1007663695768825670L; JsonSerializer() { super(RntbdTransportClient.class); } @Override public void serialize( final RntbdTransportClient value, final JsonGenerator generator, final SerializerProvider provider ) throws IOException { generator.writeStartObject(); generator.writeNumberField("id", value.id()); generator.writeBooleanField("isClosed", value.isClosed()); generator.writeObjectField("configuration", value.endpointProvider.config()); generator.writeObjectFieldStart("serviceEndpoints"); generator.writeNumberField("count", value.endpointCount()); generator.writeArrayFieldStart("items"); for (final Iterator<RntbdEndpoint> iterator = value.endpointProvider.list().iterator(); iterator.hasNext(); ) { generator.writeObject(iterator.next()); } generator.writeEndArray(); generator.writeEndObject(); generator.writeEndObject(); } } }
class RntbdTransportClient extends TransportClient { private static final String TAG_NAME = RntbdTransportClient.class.getSimpleName(); private static final AtomicLong instanceCount = new AtomicLong(); private static final Logger logger = LoggerFactory.getLogger(RntbdTransportClient.class); private final AtomicBoolean closed = new AtomicBoolean(); private final RntbdEndpoint.Provider endpointProvider; private final long id; private final Tag tag; RntbdTransportClient(final RntbdEndpoint.Provider endpointProvider) { this.endpointProvider = endpointProvider; this.id = instanceCount.incrementAndGet(); this.tag = RntbdTransportClient.tag(this.id); } RntbdTransportClient(final Options options, final SslContext sslContext) { this.endpointProvider = new RntbdServiceEndpoint.Provider(this, options, sslContext); this.id = instanceCount.incrementAndGet(); this.tag = RntbdTransportClient.tag(this.id); } RntbdTransportClient(final Configs configs, final int requestTimeoutInSeconds, final UserAgentContainer userAgent) { this(new Options.Builder(requestTimeoutInSeconds).userAgent(userAgent).build(), configs.getSslContext()); } public boolean isClosed() { return this.closed.get(); } @Override public void close() { if (this.closed.compareAndSet(false, true)) { logger.debug("close {}", this); this.endpointProvider.close(); return; } logger.debug("already closed {}", this); } public int endpointCount() { return this.endpointProvider.count(); } public int endpointEvictionCount() { return this.endpointProvider.evictions(); } public long id() { return this.id; } @Override public Tag tag() { return this.tag; } @Override public String toString() { return RntbdObjectMapper.toString(this); } private static Tag tag(long id) { return Tag.of(TAG_NAME, Strings.padStart(Long.toHexString(id).toUpperCase(), 4, '0')); } private void throwIfClosed() { checkState(!this.closed.get(), "%s is closed", this); } public static final class Options { @JsonProperty() private final int bufferPageSize; @JsonProperty() private final Duration connectionTimeout; @JsonProperty() private final Duration idleChannelTimeout; @JsonProperty() private final Duration idleEndpointTimeout; @JsonProperty() private final int maxBufferCapacity; @JsonProperty() private final int maxChannelsPerEndpoint; @JsonProperty() private final int maxRequestsPerChannel; @JsonProperty() private final Duration receiveHangDetectionTime; @JsonProperty() private final Duration requestExpiryInterval; @JsonProperty() private final Duration requestTimeout; @JsonProperty() private final Duration requestTimerResolution; @JsonProperty() private final Duration sendHangDetectionTime; @JsonProperty() private final Duration shutdownTimeout; @JsonIgnore() private final UserAgentContainer userAgent; private Options() { this.bufferPageSize = 8192; this.connectionTimeout = null; this.idleChannelTimeout = Duration.ZERO; this.idleEndpointTimeout = Duration.ofSeconds(70L); this.maxBufferCapacity = 8192 << 10; this.maxChannelsPerEndpoint = 10; this.maxRequestsPerChannel = 30; this.receiveHangDetectionTime = Duration.ofSeconds(65L); this.requestExpiryInterval = Duration.ofSeconds(5L); this.requestTimeout = null; this.requestTimerResolution = Duration.ofMillis(5L); this.sendHangDetectionTime = Duration.ofSeconds(10L); this.shutdownTimeout = Duration.ofSeconds(15L); this.userAgent = new UserAgentContainer(); } private Options(Builder builder) { this.bufferPageSize = builder.bufferPageSize; this.idleChannelTimeout = builder.idleChannelTimeout; this.idleEndpointTimeout = builder.idleEndpointTimeout; this.maxBufferCapacity = builder.maxBufferCapacity; this.maxChannelsPerEndpoint = builder.maxChannelsPerEndpoint; this.maxRequestsPerChannel = builder.maxRequestsPerChannel; this.receiveHangDetectionTime = builder.receiveHangDetectionTime; this.requestExpiryInterval = builder.requestExpiryInterval; this.requestTimeout = builder.requestTimeout; this.requestTimerResolution = builder.requestTimerResolution; this.sendHangDetectionTime = builder.sendHangDetectionTime; this.shutdownTimeout = builder.shutdownTimeout; this.userAgent = builder.userAgent; this.connectionTimeout = builder.connectionTimeout == null ? builder.requestTimeout : builder.connectionTimeout; } public int bufferPageSize() { return this.bufferPageSize; } public Duration connectionTimeout() { return this.connectionTimeout; } public Duration idleChannelTimeout() { return this.idleChannelTimeout; } public Duration idleEndpointTimeout() { return this.idleEndpointTimeout; } public int maxBufferCapacity() { return this.maxBufferCapacity; } public int maxChannelsPerEndpoint() { return this.maxChannelsPerEndpoint; } public int maxRequestsPerChannel() { return this.maxRequestsPerChannel; } public Duration receiveHangDetectionTime() { return this.receiveHangDetectionTime; } public Duration requestExpiryInterval() { return this.requestExpiryInterval; } public Duration requestTimeout() { return this.requestTimeout; } public Duration requestTimerResolution() { return this.requestTimerResolution; } public Duration sendHangDetectionTime() { return this.sendHangDetectionTime; } public Duration shutdownTimeout() { return this.shutdownTimeout; } public UserAgentContainer userAgent() { return this.userAgent; } @Override public String toString() { return RntbdObjectMapper.toJson(this); } /** * A builder for constructing {@link Options} instances. * * <h3>Using system properties to set the default {@link Options} used by an {@link Builder}</h3> * <p> * A default options instance is created when the {@link Builder} class is initialized. This instance specifies * the default options used by every {@link Builder} instance. In priority order the default options instance * is created from: * <ol> * <li>The JSON value of system property {@code azure.cosmos.directTcp.defaultOptions}. * <p>Example: * <pre>{@code -Dazure.cosmos.directTcp.defaultOptions={\"maxChannelsPerEndpoint\":5,\"maxRequestsPerChannel\":30}}</pre> * </li> * <li>The contents of the JSON file located by system property {@code azure.cosmos.directTcp * .defaultOptionsFile}. * <p>Example: * <pre>{@code -Dazure.cosmos.directTcp.defaultOptionsFile=/path/to/default/options/file}</pre> * </li> * <li>The contents of JSON resource file {@code azure.cosmos.directTcp.defaultOptions.json}. * <p>Specifically, the resource file is read from this stream: * <pre>{@code RntbdTransportClient.class.getClassLoader().getResourceAsStream("azure.cosmos.directTcp.defaultOptions.json")}</pre> * <p>Example: <pre>{@code { * "bufferPageSize": 8192, * "connectionTimeout": "PT1M", * "idleChannelTimeout": "PT0S", * "idleEndpointTimeout": "PT1M10S", * "maxBufferCapacity": 8388608, * "maxChannelsPerEndpoint": 10, * "maxRequestsPerChannel": 30, * "receiveHangDetectionTime": "PT1M5S", * "requestExpiryInterval": "PT5S", * "requestTimeout": "PT1M", * "requestTimerResolution": "PT0.5S", * "sendHangDetectionTime": "PT10S", * "shutdownTimeout": "PT15S" * }}</pre> * </li> * </ol> * <p>JSON value errors are logged and then ignored. If none of the above values are available or all available * values are in error, the default options instance is created from the private parameterless constructor for * {@link Options}. */ @SuppressWarnings("UnusedReturnValue") public static class Builder { private static final String DEFAULT_OPTIONS_PROPERTY_NAME = "azure.cosmos.directTcp.defaultOptions"; private static final Options DEFAULT_OPTIONS; static { Options options = null; try { final String string = System.getProperty(DEFAULT_OPTIONS_PROPERTY_NAME); if (string != null) { try { options = RntbdObjectMapper.readValue(string, Options.class); } catch (IOException error) { logger.error("failed to parse default Direct TCP options {} due to ", string, error); } } if (options == null) { final String path = System.getProperty(DEFAULT_OPTIONS_PROPERTY_NAME + "File"); if (path != null) { try { options = RntbdObjectMapper.readValue(new File(path), Options.class); } catch (IOException error) { logger.error("failed to load default Direct TCP options from {} due to ", path, error); } } } if (options == null) { final ClassLoader loader = RntbdTransportClient.class.getClassLoader(); final String name = DEFAULT_OPTIONS_PROPERTY_NAME + ".json"; try (InputStream stream = loader.getResourceAsStream(name)) { if (stream != null) { options = RntbdObjectMapper.readValue(stream, Options.class); } } catch (IOException error) { logger.error("failed to load Direct TCP options from resource {} due to ", name, error); } } } finally { if (options == null) { DEFAULT_OPTIONS = new Options(); } else { logger.info("Updated default Direct TCP options from system property {}: {}", DEFAULT_OPTIONS_PROPERTY_NAME, options); DEFAULT_OPTIONS = options; } } } private int bufferPageSize; private Duration connectionTimeout; private Duration idleChannelTimeout; private Duration idleEndpointTimeout; private int maxBufferCapacity; private int maxChannelsPerEndpoint; private int maxRequestsPerChannel; private Duration receiveHangDetectionTime; private Duration requestExpiryInterval; private Duration requestTimeout; private Duration requestTimerResolution; private Duration sendHangDetectionTime; private Duration shutdownTimeout; private UserAgentContainer userAgent; public Builder(Duration requestTimeout) { this.requestTimeout(requestTimeout); this.bufferPageSize = DEFAULT_OPTIONS.bufferPageSize; this.connectionTimeout = DEFAULT_OPTIONS.connectionTimeout; this.idleChannelTimeout = DEFAULT_OPTIONS.idleChannelTimeout; this.idleEndpointTimeout = DEFAULT_OPTIONS.idleEndpointTimeout; this.maxBufferCapacity = DEFAULT_OPTIONS.maxBufferCapacity; this.maxChannelsPerEndpoint = DEFAULT_OPTIONS.maxChannelsPerEndpoint; this.maxRequestsPerChannel = DEFAULT_OPTIONS.maxRequestsPerChannel; this.receiveHangDetectionTime = DEFAULT_OPTIONS.receiveHangDetectionTime; this.requestExpiryInterval = DEFAULT_OPTIONS.requestExpiryInterval; this.requestTimerResolution = DEFAULT_OPTIONS.requestTimerResolution; this.sendHangDetectionTime = DEFAULT_OPTIONS.sendHangDetectionTime; this.shutdownTimeout = DEFAULT_OPTIONS.shutdownTimeout; this.userAgent = DEFAULT_OPTIONS.userAgent; } public Builder(int requestTimeoutInSeconds) { this(Duration.ofSeconds(requestTimeoutInSeconds)); } public Builder bufferPageSize(final int value) { checkArgument(value >= 4096 && (value & (value - 1)) == 0, "expected value to be a power of 2 >= 4096, not %s", value); this.bufferPageSize = value; return this; } public Options build() { checkState(this.bufferPageSize <= this.maxBufferCapacity, "expected bufferPageSize (%s) <= maxBufferCapacity (%s)", this.bufferPageSize, this.maxBufferCapacity); return new Options(this); } public Builder connectionTimeout(final Duration value) { checkArgument(value == null || value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.connectionTimeout = value; return this; } public Builder idleChannelTimeout(final Duration value) { checkNotNull(value, "expected non-null value"); this.idleChannelTimeout = value; return this; } public Builder idleEndpointTimeout(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.idleEndpointTimeout = value; return this; } public Builder maxBufferCapacity(final int value) { checkArgument(value > 0 && (value & (value - 1)) == 0, "expected positive value, not %s", value); this.maxBufferCapacity = value; return this; } public Builder maxChannelsPerEndpoint(final int value) { checkArgument(value > 0, "expected positive value, not %s", value); this.maxChannelsPerEndpoint = value; return this; } public Builder maxRequestsPerChannel(final int value) { checkArgument(value > 0, "expected positive value, not %s", value); this.maxRequestsPerChannel = value; return this; } public Builder receiveHangDetectionTime(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.receiveHangDetectionTime = value; return this; } public Builder requestExpiryInterval(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.requestExpiryInterval = value; return this; } public Builder requestTimeout(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.requestTimeout = value; return this; } public Builder requestTimerResolution(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.requestTimerResolution = value; return this; } public Builder sendHangDetectionTime(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.sendHangDetectionTime = value; return this; } public Builder shutdownTimeout(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.shutdownTimeout = value; return this; } public Builder userAgent(final UserAgentContainer value) { checkNotNull(value, "expected non-null value"); this.userAgent = value; return this; } } } static final class JsonSerializer extends StdSerializer<RntbdTransportClient> { private static final long serialVersionUID = 1007663695768825670L; JsonSerializer() { super(RntbdTransportClient.class); } @Override public void serialize( final RntbdTransportClient value, final JsonGenerator generator, final SerializerProvider provider ) throws IOException { generator.writeStartObject(); generator.writeNumberField("id", value.id()); generator.writeBooleanField("isClosed", value.isClosed()); generator.writeObjectField("configuration", value.endpointProvider.config()); generator.writeObjectFieldStart("serviceEndpoints"); generator.writeNumberField("count", value.endpointCount()); generator.writeArrayFieldStart("items"); for (final Iterator<RntbdEndpoint> iterator = value.endpointProvider.list().iterator(); iterator.hasNext(); ) { generator.writeObject(iterator.next()); } generator.writeEndArray(); generator.writeEndObject(); generator.writeEndObject(); } } }