comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
thanks for the debugging. The change from `Optional<String>` to `String` means that `getOdataNextLink()` now returns a string or null if there is no `odata.nextLink` in the response. I replaced `isPresent()` with a null check in https://github.com/Azure/azure-sdk-for-java/pull/14305/commits/cc20c0521dbed11c5ffc661c23de8ed1ff65e9b4
private List<UserGroup> loadUserGroups(String graphApiToken) throws IOException { String responseInJson = getUserMemberships(graphApiToken, Optional.empty()); final List<UserGroup> lUserGroups = new ArrayList<>(); final ObjectMapper objectMapper = JacksonObjectMapperFactory.getInstance(); objectMapper.registerModule(new Jdk8Module()); UserGroups groupsFromJson = objectMapper.readValue(responseInJson, UserGroups.class); if (groupsFromJson.getValue() != null) { lUserGroups.addAll(groupsFromJson.getValue().stream().filter(this::isMatchingUserGroupKey) .collect(Collectors.toList())); } while (groupsFromJson.getOdataNextLink().isPresent()) { responseInJson = getUserMemberships(graphApiToken, groupsFromJson.getOdataNextLink()); groupsFromJson = objectMapper.readValue(responseInJson, UserGroups.class); lUserGroups.addAll(groupsFromJson.getValue().stream().filter(this::isMatchingUserGroupKey) .collect(Collectors.toList())); } return lUserGroups; }
while (groupsFromJson.getOdataNextLink().isPresent()) {
private List<UserGroup> loadUserGroups(String graphApiToken) throws IOException { String responseInJson = getUserMemberships(graphApiToken, null); final List<UserGroup> lUserGroups = new ArrayList<>(); final ObjectMapper objectMapper = JacksonObjectMapperFactory.getInstance(); UserGroups groupsFromJson = objectMapper.readValue(responseInJson, UserGroups.class); if (groupsFromJson.getValue() != null) { lUserGroups.addAll(groupsFromJson.getValue().stream().filter(this::isMatchingUserGroupKey) .collect(Collectors.toList())); } while (groupsFromJson.getOdataNextLink() != null) { responseInJson = getUserMemberships(graphApiToken, groupsFromJson.getOdataNextLink()); groupsFromJson = objectMapper.readValue(responseInJson, UserGroups.class); lUserGroups.addAll(groupsFromJson.getValue().stream().filter(this::isMatchingUserGroupKey) .collect(Collectors.toList())); } return lUserGroups; }
class AzureADGraphClient { private static final Logger LOGGER = LoggerFactory.getLogger(AzureADGraphClient.class); private static final SimpleGrantedAuthority DEFAULT_AUTHORITY = new SimpleGrantedAuthority("ROLE_USER"); private static final String DEFAULT_ROLE_PREFIX = "ROLE_"; private static final String MICROSOFT_GRAPH_SCOPE = "https: private static final String AAD_GRAPH_API_SCOPE = "https: private static final String REQUEST_ID_SUFFIX = "aadfeed6"; private final String clientId; private final String clientSecret; private final ServiceEndpoints serviceEndpoints; private final AADAuthenticationProperties aadAuthenticationProperties; private static final String V2_VERSION_ENV_FLAG = "v2-graph"; private boolean aadMicrosoftGraphApiBool; public AzureADGraphClient(String clientId, String clientSecret, AADAuthenticationProperties aadAuthProps, ServiceEndpointsProperties serviceEndpointsProps) { this.clientId = clientId; this.clientSecret = clientSecret; this.aadAuthenticationProperties = aadAuthProps; this.serviceEndpoints = serviceEndpointsProps.getServiceEndpoints(aadAuthProps.getEnvironment()); this.initAADMicrosoftGraphApiBool(aadAuthProps.getEnvironment()); } private void initAADMicrosoftGraphApiBool(String endpointEnv) { this.aadMicrosoftGraphApiBool = endpointEnv.contains(V2_VERSION_ENV_FLAG); } private String getUserMemberships(String accessToken, Optional<String> odataNextLink) throws IOException { final URL url = new URL(serviceEndpoints.getAadMembershipRestUri()); final HttpURLConnection conn = (HttpURLConnection) url.openConnection(); if (this.aadMicrosoftGraphApiBool) { conn.setRequestMethod(HttpMethod.GET.toString()); conn.setRequestProperty(HttpHeaders.AUTHORIZATION, String.format("Bearer %s", accessToken)); conn.setRequestProperty(HttpHeaders.ACCEPT, MediaType.APPLICATION_JSON_VALUE); conn.setRequestProperty(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_FORM_URLENCODED_VALUE); } else { conn.setRequestMethod(HttpMethod.GET.toString()); conn.setRequestProperty("api-version", "1.6"); conn.setRequestProperty(HttpHeaders.AUTHORIZATION, String.format("Bearer %s", accessToken)); conn.setRequestProperty(HttpHeaders.ACCEPT, "application/json;odata=minimalmetadata"); } final String responseInJson = getResponseStringFromConn(conn); final int responseCode = conn.getResponseCode(); if (responseCode == HTTPResponse.SC_OK) { return responseInJson; } else { throw new IllegalStateException("Response is not " + HTTPResponse.SC_OK + ", response json: " + responseInJson); } } private static String getResponseStringFromConn(HttpURLConnection conn) throws IOException { try (BufferedReader reader = new BufferedReader( new InputStreamReader(conn.getInputStream(), StandardCharsets.UTF_8))) { final StringBuilder stringBuffer = new StringBuilder(); String line; while ((line = reader.readLine()) != null) { stringBuffer.append(line); } return stringBuffer.toString(); } } public List<UserGroup> getGroups(String graphApiToken) throws IOException { return loadUserGroups(graphApiToken); } /** * Checks that the JSON Node is a valid User Group to extract User Groups from * * @param node - json node to look for a key/value to equate against the * {@link AADAuthenticationProperties.UserGroupProperties} * @return true if the json node contains the correct key, and expected value to identify a user group. */ private boolean isMatchingUserGroupKey(final UserGroup group) { return group.getObjectType().equals(aadAuthenticationProperties.getUserGroup().getValue()); } public Set<GrantedAuthority> getGrantedAuthorities(String graphApiToken) throws IOException { final List<UserGroup> groups = getGroups(graphApiToken); return convertGroupsToGrantedAuthorities(groups); } /** * Converts UserGroup list to Set of GrantedAuthorities * * @param groups user groups * @return granted authorities */ public Set<GrantedAuthority> convertGroupsToGrantedAuthorities(final List<UserGroup> groups) { final Set<GrantedAuthority> mappedAuthorities = groups.stream().filter(this::isValidUserGroupToGrantAuthority) .map(userGroup -> new SimpleGrantedAuthority(DEFAULT_ROLE_PREFIX + userGroup.getDisplayName())) .collect(Collectors.toCollection(LinkedHashSet::new)); if (mappedAuthorities.isEmpty()) { mappedAuthorities.add(DEFAULT_AUTHORITY); } return mappedAuthorities; } /** * Determines if this is a valid {@link UserGroup} to build to a GrantedAuthority. * <p> * If the {@link AADAuthenticationProperties.UserGroupProperties * contains the {@link UserGroup * true. * * @param group - User Group to check if valid to grant an authority to. * @return true if allowed-groups contains the UserGroup display name */ private boolean isValidUserGroupToGrantAuthority(final UserGroup group) { return aadAuthenticationProperties.getUserGroup().getAllowedGroups().contains(group.getDisplayName()); } public IAuthenticationResult acquireTokenForGraphApi(String idToken, String tenantId) throws ServiceUnavailableException { final IClientCredential clientCredential = ClientCredentialFactory.createFromSecret(clientSecret); final UserAssertion assertion = new UserAssertion(idToken); IAuthenticationResult result = null; ExecutorService service = null; try { service = Executors.newFixedThreadPool(1); final ConfidentialClientApplication application = ConfidentialClientApplication .builder(clientId, clientCredential) .authority(serviceEndpoints.getAadSigninUri() + tenantId + "/") .correlationId(getCorrelationId()) .build(); final Set<String> scopes = new HashSet<>(); scopes.add(aadMicrosoftGraphApiBool ? MICROSOFT_GRAPH_SCOPE : AAD_GRAPH_API_SCOPE); final OnBehalfOfParameters onBehalfOfParameters = OnBehalfOfParameters .builder(scopes, assertion) .build(); final CompletableFuture<IAuthenticationResult> future = application.acquireToken(onBehalfOfParameters); result = future.get(); } catch (ExecutionException | InterruptedException | MalformedURLException e) { final Throwable cause = e.getCause(); if (cause instanceof MsalServiceException) { final MsalServiceException exception = (MsalServiceException) cause; if (exception.claims() != null && !exception.claims().isEmpty()) { throw exception; } } LOGGER.error("acquire on behalf of token for graph api error", e); } finally { if (service != null) { service.shutdown(); } } if (result == null) { throw new ServiceUnavailableException("unable to acquire on-behalf-of token for client " + clientId); } return result; } private static String getCorrelationId() { final String uuid = UUID.randomUUID().toString(); return uuid.substring(0, uuid.length() - REQUEST_ID_SUFFIX.length()) + REQUEST_ID_SUFFIX; } }
class AzureADGraphClient { private static final Logger LOGGER = LoggerFactory.getLogger(AzureADGraphClient.class); private static final SimpleGrantedAuthority DEFAULT_AUTHORITY = new SimpleGrantedAuthority("ROLE_USER"); private static final String DEFAULT_ROLE_PREFIX = "ROLE_"; private static final String MICROSOFT_GRAPH_SCOPE = "https: private static final String AAD_GRAPH_API_SCOPE = "https: private static final String REQUEST_ID_SUFFIX = "aadfeed6"; private final String clientId; private final String clientSecret; private final ServiceEndpoints serviceEndpoints; private final AADAuthenticationProperties aadAuthenticationProperties; private static final String V2_VERSION_ENV_FLAG = "v2-graph"; private boolean aadMicrosoftGraphApiBool; public AzureADGraphClient(String clientId, String clientSecret, AADAuthenticationProperties aadAuthProps, ServiceEndpointsProperties serviceEndpointsProps) { this.clientId = clientId; this.clientSecret = clientSecret; this.aadAuthenticationProperties = aadAuthProps; this.serviceEndpoints = serviceEndpointsProps.getServiceEndpoints(aadAuthProps.getEnvironment()); this.initAADMicrosoftGraphApiBool(aadAuthProps.getEnvironment()); } private void initAADMicrosoftGraphApiBool(String endpointEnv) { this.aadMicrosoftGraphApiBool = endpointEnv.contains(V2_VERSION_ENV_FLAG); } private String getUserMemberships(String accessToken, String odataNextLink) throws IOException { final URL url = buildUrl(odataNextLink); final HttpURLConnection conn = (HttpURLConnection) url.openConnection(); if (this.aadMicrosoftGraphApiBool) { conn.setRequestMethod(HttpMethod.GET.toString()); conn.setRequestProperty(HttpHeaders.AUTHORIZATION, String.format("Bearer %s", accessToken)); conn.setRequestProperty(HttpHeaders.ACCEPT, MediaType.APPLICATION_JSON_VALUE); conn.setRequestProperty(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_FORM_URLENCODED_VALUE); } else { conn.setRequestMethod(HttpMethod.GET.toString()); conn.setRequestProperty("api-version", "1.6"); conn.setRequestProperty(HttpHeaders.AUTHORIZATION, String.format("Bearer %s", accessToken)); conn.setRequestProperty(HttpHeaders.ACCEPT, "application/json;odata=minimalmetadata"); } final String responseInJson = getResponseStringFromConn(conn); final int responseCode = conn.getResponseCode(); if (responseCode == HTTPResponse.SC_OK) { return responseInJson; } else { throw new IllegalStateException("Response is not " + HTTPResponse.SC_OK + ", response json: " + responseInJson); } } private String getSkipTokenFromLink(String odataNextLink) { String[] parts = odataNextLink.split("/memberOf\\?"); return parts[1]; } private URL buildUrl(String odataNextLink) throws MalformedURLException { URL url; if (odataNextLink != null) { if (this.aadMicrosoftGraphApiBool) { url = new URL(odataNextLink); } else { String skipToken = getSkipTokenFromLink(odataNextLink); url = new URL(serviceEndpoints.getAadMembershipRestUri() + "&" + skipToken); } } else { url = new URL(serviceEndpoints.getAadMembershipRestUri()); } return url; } private static String getResponseStringFromConn(HttpURLConnection conn) throws IOException { try (BufferedReader reader = new BufferedReader( new InputStreamReader(conn.getInputStream(), StandardCharsets.UTF_8))) { final StringBuilder stringBuffer = new StringBuilder(); String line; while ((line = reader.readLine()) != null) { stringBuffer.append(line); } return stringBuffer.toString(); } } public List<UserGroup> getGroups(String graphApiToken) throws IOException { return loadUserGroups(graphApiToken); } /** * Checks that the UserGroup has a Group object type. * * @param node - json node to look for a key/value to equate against the * {@link AADAuthenticationProperties.UserGroupProperties} * @return true if the json node contains the correct key, and expected value to identify a user group. */ private boolean isMatchingUserGroupKey(final UserGroup group) { return group.getObjectType().equals(aadAuthenticationProperties.getUserGroup().getValue()); } public Set<GrantedAuthority> getGrantedAuthorities(String graphApiToken) throws IOException { final List<UserGroup> groups = getGroups(graphApiToken); return convertGroupsToGrantedAuthorities(groups); } /** * Converts UserGroup list to Set of GrantedAuthorities * * @param groups user groups * @return granted authorities */ public Set<GrantedAuthority> convertGroupsToGrantedAuthorities(final List<UserGroup> groups) { final Set<GrantedAuthority> mappedAuthorities = groups.stream().filter(this::isValidUserGroupToGrantAuthority) .map(userGroup -> new SimpleGrantedAuthority(DEFAULT_ROLE_PREFIX + userGroup.getDisplayName())) .collect(Collectors.toCollection(LinkedHashSet::new)); if (mappedAuthorities.isEmpty()) { mappedAuthorities.add(DEFAULT_AUTHORITY); } return mappedAuthorities; } /** * Determines if this is a valid {@link UserGroup} to build to a GrantedAuthority. * <p> * If the {@link AADAuthenticationProperties.UserGroupProperties * contains the {@link UserGroup * true. * * @param group - User Group to check if valid to grant an authority to. * @return true if allowed-groups contains the UserGroup display name */ private boolean isValidUserGroupToGrantAuthority(final UserGroup group) { return aadAuthenticationProperties.getUserGroup().getAllowedGroups().contains(group.getDisplayName()); } public IAuthenticationResult acquireTokenForGraphApi(String idToken, String tenantId) throws ServiceUnavailableException { final IClientCredential clientCredential = ClientCredentialFactory.createFromSecret(clientSecret); final UserAssertion assertion = new UserAssertion(idToken); IAuthenticationResult result = null; ExecutorService service = null; try { service = Executors.newFixedThreadPool(1); final ConfidentialClientApplication application = ConfidentialClientApplication .builder(clientId, clientCredential) .authority(serviceEndpoints.getAadSigninUri() + tenantId + "/") .correlationId(getCorrelationId()) .build(); final Set<String> scopes = new HashSet<>(); scopes.add(aadMicrosoftGraphApiBool ? MICROSOFT_GRAPH_SCOPE : AAD_GRAPH_API_SCOPE); final OnBehalfOfParameters onBehalfOfParameters = OnBehalfOfParameters .builder(scopes, assertion) .build(); final CompletableFuture<IAuthenticationResult> future = application.acquireToken(onBehalfOfParameters); result = future.get(); } catch (ExecutionException | InterruptedException | MalformedURLException e) { final Throwable cause = e.getCause(); if (cause instanceof MsalServiceException) { final MsalServiceException exception = (MsalServiceException) cause; if (exception.claims() != null && !exception.claims().isEmpty()) { throw exception; } } LOGGER.error("acquire on behalf of token for graph api error", e); } finally { if (service != null) { service.shutdown(); } } if (result == null) { throw new ServiceUnavailableException("unable to acquire on-behalf-of token for client " + clientId); } return result; } private static String getCorrelationId() { final String uuid = UUID.randomUUID().toString(); return uuid.substring(0, uuid.length() - REQUEST_ID_SUFFIX.length()) + REQUEST_ID_SUFFIX; } }
this fix is not related to empty id but it is a quick fix for make sure the async and sync tests are verifying the same thing. Previously they are verifying the error.
public void recognizeEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntitySingleErrorRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .assertNext(resultCollection -> resultCollection.getValue().forEach(recognizeEntitiesResult -> { Exception exception = assertThrows(TextAnalyticsException.class, recognizeEntitiesResult::getEntities); assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizeEntitiesResult"), exception.getMessage()); })).verifyComplete()); }
assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizeEntitiesResult"), exception.getMessage());
public void recognizeEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntitySingleErrorRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .assertNext(resultCollection -> resultCollection.getValue().forEach(recognizeEntitiesResult -> { Exception exception = assertThrows(TextAnalyticsException.class, recognizeEntitiesResult::getEntities); assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizeEntitiesResult"), exception.getMessage()); })).verifyComplete()); }
class TextAnalyticsAsyncClientTest extends TextAnalyticsClientTestBase { private TextAnalyticsAsyncClient client; @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } private TextAnalyticsAsyncClient getTextAnalyticsAsyncClient(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { return getTextAnalyticsAsyncClientBuilder(httpClient, serviceVersion).buildAsyncClient(); } /** * Verify that we can get statistics on the collection result when given a batch of documents with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageShowStatisticsRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options)) .assertNext(response -> validateDetectLanguageResultCollectionWithResponse(true, getExpectedBatchDetectedLanguages(), 200, response) ) .verifyComplete()); } /** * Test to detect language for each {@code DetectLanguageResult} input of a batch. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageRunner((inputs) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null)) .assertNext(response -> validateDetectLanguageResultCollectionWithResponse(false, getExpectedBatchDetectedLanguages(), 200, response)) .verifyComplete()); } /** * Test to detect language for each string input of batch with given country hint. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchListCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguagesCountryHintRunner((inputs, countryHint) -> StepVerifier.create(client.detectLanguageBatch(inputs, countryHint, null)) .assertNext(actualResults -> validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), actualResults)) .verifyComplete()); } /** * Test to detect language for each string input of batch with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchListCountryHintWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguagesBatchListCountryHintWithOptionsRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatch(inputs, null, options)) .assertNext(response -> validateDetectLanguageResultCollection(true, getExpectedBatchDetectedLanguages(), response)) .verifyComplete()); } /** * Test to detect language for each string input of batch. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageStringInputRunner((inputs) -> StepVerifier.create(client.detectLanguageBatch(inputs, null, null)) .assertNext(response -> validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), response)) .verifyComplete()); } /** * Verifies that a single DetectedLanguage is returned for a document to detectLanguage. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectSingleTextLanguage(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectSingleTextLanguageRunner(input -> StepVerifier.create(client.detectLanguage(input)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageEnglish(), response)) .verifyComplete()); } /** * Verifies that an TextAnalyticsException is thrown for a document with invalid country hint. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageInvalidCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageInvalidCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_COUNTRY_HINT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } /** * Verifies that TextAnalyticsException is thrown for an empty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.detectLanguage(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } /** * Verifies that detectLanguage returns an "UNKNOWN" result when faulty text is passed. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.detectLanguage(input)) .assertNext(response -> validatePrimaryLanguage(getUnknownDetectedLanguage(), response)) .verifyComplete()); } /** * Verifies that a bad request exception is returned for input documents with same ids. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageDuplicateIdRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } /** * Verifies that an invalid document exception is returned for input documents with an empty ID. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageInputEmptyIdRunner(inputs -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } /** * Verify that with countryHint with empty string will not throw exception. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageEmptyCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response)) .verifyComplete()); } /** * Verify that with countryHint with "none" will not throw exception. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageNoneCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageNoneCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntitiesForSingleTextInputRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .assertNext(response -> validateCategorizedEntities(getCategorizedEntitiesList1(), response.stream().collect(Collectors.toList()))) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntityRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(false, getExpectedBatchCategorizedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(true, getExpectedBatchCategorizedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntityStringInputRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, null)) .assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntitiesLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, language, null)) .assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForListWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeStringBatchCategorizedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, options)) .assertNext(response -> validateCategorizedEntitiesResultCollection(true, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizePiiSingleDocumentRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .assertNext(response -> validatePiiEntities(getPiiEntitiesList1(), response.stream().collect(Collectors.toList()))) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE.equals(throwable.getMessage())) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitySingleErrorRunner((inputs) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .assertNext(resultCollection -> resultCollection.getValue().forEach(recognizePiiEntitiesResult -> { Exception exception = assertThrows(TextAnalyticsException.class, recognizePiiEntitiesResult::getEntities); assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizePiiEntitiesResult"), exception.getMessage()); })).verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitiesRunner((inputs) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(false, getExpectedBatchPiiEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(true, getExpectedBatchPiiEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizePiiLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, language, null)) .assertNext(response -> validatePiiEntitiesResultCollection(false, getExpectedBatchPiiEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeStringBatchPiiEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, null, options)) .assertNext(response -> validatePiiEntitiesResultCollection(true, getExpectedBatchPiiEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedEntitiesForSingleTextInputRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .assertNext(response -> validateLinkedEntity(getLinkedEntitiesList1().get(0), response.iterator().next())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntityRunner((inputs) -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(false, getExpectedBatchLinkedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(true, getExpectedBatchLinkedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedStringInputRunner((inputs) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, null)) .assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, language, null)) .assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchStringLinkedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, options)) .assertNext(response -> validateLinkedEntitiesResultCollection(true, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesForSingleTextInputRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(response -> assertEquals("monde", response.iterator().next())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesDuplicateIdRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesRunner((inputs) -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(false, getExpectedBatchKeyPhrases(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesShowStatsRunner((inputs, options) -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, options)) .assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(true, getExpectedBatchKeyPhrases(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesStringInputRunner((inputs) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesLanguageHintRunner((inputs, language) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, language, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchStringKeyPhrasesShowStatsRunner((inputs, options) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, options)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(true, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesWarning(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesWarningRunner( input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(keyPhrasesResult -> { keyPhrasesResult.getWarnings().forEach(warning -> { assertTrue(WARNING_TOO_LONG_DOCUMENT_INPUT_MESSAGE.equals(warning.getMessage())); assertTrue(LONG_WORDS_IN_DOCUMENT.equals(warning.getWarningCode())); }); }) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesBatchWarning(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesBatchWarningRunner( inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .assertNext(response -> response.getValue().forEach(keyPhrasesResult -> keyPhrasesResult.getKeyPhrases().getWarnings().forEach(warning -> { assertTrue(WARNING_TOO_LONG_DOCUMENT_INPUT_MESSAGE.equals(warning.getMessage())); assertTrue(LONG_WORDS_IN_DOCUMENT.equals(warning.getWarningCode())); }) )) .verifyComplete() ); } /** * Test analyzing sentiment for a string input. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment(TextSentiment.MIXED, new SentimentConfidenceScores(0.0, 0.0, 0.0), new IterableStream<>(Arrays.asList( new SentenceSentiment("", TextSentiment.NEGATIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)), new SentenceSentiment("", TextSentiment.POSITIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)) )), null); StepVerifier .create(client.analyzeSentiment("The hotel was dark and unclean. The restaurant had amazing gnocchi.")) .assertNext(response -> validateAnalyzedSentiment(expectedDocumentSentiment, response)).verifyComplete(); } /** * Verifies that an TextAnalyticsException is thrown for an empty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.analyzeSentiment("")) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify(); } /** * Test analyzing sentiment for a faulty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment( TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0), new IterableStream<>(Arrays.asList( new SentenceSentiment("", TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0)), new SentenceSentiment("", TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0)) )), null); StepVerifier.create(client.analyzeSentiment("!@ .assertNext(response -> validateAnalyzedSentiment(expectedDocumentSentiment, response)).verifyComplete(); } /** * Test analyzing sentiment for a duplicate ID list. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentDuplicateIdRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } /** * Verifies that an invalid document exception is returned for input documents with an empty ID. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } /** * Test analyzing sentiment for a list of string input. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeSentimentStringInputRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, null)) .assertNext(response -> validateSentimentResultCollection(false, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a list of string input with language code. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeSentimentLanguageHintRunner((inputs, language) -> StepVerifier.create(client.analyzeSentimentBatch(inputs, language, null)) .assertNext(response -> validateSentimentResultCollection(false, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a list of string input with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchStringSentimentShowStatsRunner((inputs, options) -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, options)) .assertNext(response -> validateSentimentResultCollection(true, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a batch of documents. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .assertNext(response -> validateSentimentResultCollectionWithResponse(false, getExpectedBatchTextSentiment(), 200, response)) .verifyComplete()); } /** * Verify that we can get statistics on the collection result when given a batch of documents with options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentShowStatsRunner((inputs, options) -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, options)) .assertNext(response -> validateSentimentResultCollectionWithResponse(true, getExpectedBatchTextSentiment(), 200, response)) .verifyComplete()); } }
class TextAnalyticsAsyncClientTest extends TextAnalyticsClientTestBase { private TextAnalyticsAsyncClient client; @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } private TextAnalyticsAsyncClient getTextAnalyticsAsyncClient(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { return getTextAnalyticsAsyncClientBuilder(httpClient, serviceVersion).buildAsyncClient(); } /** * Verify that we can get statistics on the collection result when given a batch of documents with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageShowStatisticsRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options)) .assertNext(response -> validateDetectLanguageResultCollectionWithResponse(true, getExpectedBatchDetectedLanguages(), 200, response) ) .verifyComplete()); } /** * Test to detect language for each {@code DetectLanguageResult} input of a batch. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageRunner((inputs) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null)) .assertNext(response -> validateDetectLanguageResultCollectionWithResponse(false, getExpectedBatchDetectedLanguages(), 200, response)) .verifyComplete()); } /** * Test to detect language for each string input of batch with given country hint. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchListCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguagesCountryHintRunner((inputs, countryHint) -> StepVerifier.create(client.detectLanguageBatch(inputs, countryHint, null)) .assertNext(actualResults -> validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), actualResults)) .verifyComplete()); } /** * Test to detect language for each string input of batch with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchListCountryHintWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguagesBatchListCountryHintWithOptionsRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatch(inputs, null, options)) .assertNext(response -> validateDetectLanguageResultCollection(true, getExpectedBatchDetectedLanguages(), response)) .verifyComplete()); } /** * Test to detect language for each string input of batch. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageStringInputRunner((inputs) -> StepVerifier.create(client.detectLanguageBatch(inputs, null, null)) .assertNext(response -> validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), response)) .verifyComplete()); } /** * Verifies that a single DetectedLanguage is returned for a document to detectLanguage. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectSingleTextLanguage(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectSingleTextLanguageRunner(input -> StepVerifier.create(client.detectLanguage(input)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageEnglish(), response)) .verifyComplete()); } /** * Verifies that an TextAnalyticsException is thrown for a document with invalid country hint. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageInvalidCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageInvalidCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_COUNTRY_HINT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } /** * Verifies that TextAnalyticsException is thrown for an empty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.detectLanguage(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } /** * Verifies that detectLanguage returns an "UNKNOWN" result when faulty text is passed. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.detectLanguage(input)) .assertNext(response -> validatePrimaryLanguage(getUnknownDetectedLanguage(), response)) .verifyComplete()); } /** * Verifies that a bad request exception is returned for input documents with same ids. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageDuplicateIdRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } /** * Verifies that an invalid document exception is returned for input documents with an empty ID. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageInputEmptyIdRunner(inputs -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } /** * Verify that with countryHint with empty string will not throw exception. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageEmptyCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response)) .verifyComplete()); } /** * Verify that with countryHint with "none" will not throw exception. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageNoneCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageNoneCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntitiesForSingleTextInputRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .assertNext(response -> validateCategorizedEntities(getCategorizedEntitiesList1(), response.stream().collect(Collectors.toList()))) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntityRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(false, getExpectedBatchCategorizedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(true, getExpectedBatchCategorizedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntityStringInputRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, null)) .assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntitiesLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, language, null)) .assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForListWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeStringBatchCategorizedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, options)) .assertNext(response -> validateCategorizedEntitiesResultCollection(true, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizePiiSingleDocumentRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .assertNext(response -> validatePiiEntities(getPiiEntitiesList1(), response.stream().collect(Collectors.toList()))) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE.equals(throwable.getMessage())) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitySingleErrorRunner((inputs) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .assertNext(resultCollection -> resultCollection.getValue().forEach(recognizePiiEntitiesResult -> { Exception exception = assertThrows(TextAnalyticsException.class, recognizePiiEntitiesResult::getEntities); assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizePiiEntitiesResult"), exception.getMessage()); })).verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitiesRunner((inputs) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(false, getExpectedBatchPiiEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(true, getExpectedBatchPiiEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizePiiLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, language, null)) .assertNext(response -> validatePiiEntitiesResultCollection(false, getExpectedBatchPiiEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeStringBatchPiiEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, null, options)) .assertNext(response -> validatePiiEntitiesResultCollection(true, getExpectedBatchPiiEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedEntitiesForSingleTextInputRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .assertNext(response -> validateLinkedEntity(getLinkedEntitiesList1().get(0), response.iterator().next())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntityRunner((inputs) -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(false, getExpectedBatchLinkedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(true, getExpectedBatchLinkedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedStringInputRunner((inputs) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, null)) .assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, language, null)) .assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchStringLinkedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, options)) .assertNext(response -> validateLinkedEntitiesResultCollection(true, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesForSingleTextInputRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(response -> assertEquals("monde", response.iterator().next())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesDuplicateIdRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesRunner((inputs) -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(false, getExpectedBatchKeyPhrases(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesShowStatsRunner((inputs, options) -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, options)) .assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(true, getExpectedBatchKeyPhrases(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesStringInputRunner((inputs) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesLanguageHintRunner((inputs, language) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, language, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchStringKeyPhrasesShowStatsRunner((inputs, options) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, options)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(true, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesWarning(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesWarningRunner( input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(keyPhrasesResult -> { keyPhrasesResult.getWarnings().forEach(warning -> { assertTrue(WARNING_TOO_LONG_DOCUMENT_INPUT_MESSAGE.equals(warning.getMessage())); assertTrue(LONG_WORDS_IN_DOCUMENT.equals(warning.getWarningCode())); }); }) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesBatchWarning(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesBatchWarningRunner( inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .assertNext(response -> response.getValue().forEach(keyPhrasesResult -> keyPhrasesResult.getKeyPhrases().getWarnings().forEach(warning -> { assertTrue(WARNING_TOO_LONG_DOCUMENT_INPUT_MESSAGE.equals(warning.getMessage())); assertTrue(LONG_WORDS_IN_DOCUMENT.equals(warning.getWarningCode())); }) )) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } /** * Test analyzing sentiment for a string input. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment(TextSentiment.MIXED, new SentimentConfidenceScores(0.0, 0.0, 0.0), new IterableStream<>(Arrays.asList( new SentenceSentiment("", TextSentiment.NEGATIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)), new SentenceSentiment("", TextSentiment.POSITIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)) )), null); StepVerifier .create(client.analyzeSentiment("The hotel was dark and unclean. The restaurant had amazing gnocchi.")) .assertNext(response -> validateAnalyzedSentiment(expectedDocumentSentiment, response)).verifyComplete(); } /** * Verifies that an TextAnalyticsException is thrown for an empty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.analyzeSentiment("")) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify(); } /** * Test analyzing sentiment for a faulty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment( TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0), new IterableStream<>(Arrays.asList( new SentenceSentiment("", TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0)), new SentenceSentiment("", TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0)) )), null); StepVerifier.create(client.analyzeSentiment("!@ .assertNext(response -> validateAnalyzedSentiment(expectedDocumentSentiment, response)).verifyComplete(); } /** * Test analyzing sentiment for a duplicate ID list. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentDuplicateIdRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } /** * Verifies that an invalid document exception is returned for input documents with an empty ID. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } /** * Test analyzing sentiment for a list of string input. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeSentimentStringInputRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, null)) .assertNext(response -> validateSentimentResultCollection(false, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a list of string input with language code. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeSentimentLanguageHintRunner((inputs, language) -> StepVerifier.create(client.analyzeSentimentBatch(inputs, language, null)) .assertNext(response -> validateSentimentResultCollection(false, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a list of string input with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchStringSentimentShowStatsRunner((inputs, options) -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, options)) .assertNext(response -> validateSentimentResultCollection(true, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a batch of documents. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .assertNext(response -> validateSentimentResultCollectionWithResponse(false, getExpectedBatchTextSentiment(), 200, response)) .verifyComplete()); } /** * Verify that we can get statistics on the collection result when given a batch of documents with options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentShowStatsRunner((inputs, options) -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, options)) .assertNext(response -> validateSentimentResultCollectionWithResponse(true, getExpectedBatchTextSentiment(), 200, response)) .verifyComplete()); } /** * Verifies that an InvalidDocumentBatch exception is returned for input documents with too many documents. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } }
same reason as https://github.com/Azure/azure-sdk-for-java/pull/14324/files#r474852892
public void recognizePiiEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitySingleErrorRunner((inputs) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .assertNext(resultCollection -> resultCollection.getValue().forEach(recognizePiiEntitiesResult -> { Exception exception = assertThrows(TextAnalyticsException.class, recognizePiiEntitiesResult::getEntities); assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizePiiEntitiesResult"), exception.getMessage()); })).verifyComplete()); }
.assertNext(resultCollection -> resultCollection.getValue().forEach(recognizePiiEntitiesResult -> {
public void recognizePiiEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitySingleErrorRunner((inputs) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .assertNext(resultCollection -> resultCollection.getValue().forEach(recognizePiiEntitiesResult -> { Exception exception = assertThrows(TextAnalyticsException.class, recognizePiiEntitiesResult::getEntities); assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizePiiEntitiesResult"), exception.getMessage()); })).verifyComplete()); }
class TextAnalyticsAsyncClientTest extends TextAnalyticsClientTestBase { private TextAnalyticsAsyncClient client; @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } private TextAnalyticsAsyncClient getTextAnalyticsAsyncClient(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { return getTextAnalyticsAsyncClientBuilder(httpClient, serviceVersion).buildAsyncClient(); } /** * Verify that we can get statistics on the collection result when given a batch of documents with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageShowStatisticsRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options)) .assertNext(response -> validateDetectLanguageResultCollectionWithResponse(true, getExpectedBatchDetectedLanguages(), 200, response) ) .verifyComplete()); } /** * Test to detect language for each {@code DetectLanguageResult} input of a batch. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageRunner((inputs) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null)) .assertNext(response -> validateDetectLanguageResultCollectionWithResponse(false, getExpectedBatchDetectedLanguages(), 200, response)) .verifyComplete()); } /** * Test to detect language for each string input of batch with given country hint. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchListCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguagesCountryHintRunner((inputs, countryHint) -> StepVerifier.create(client.detectLanguageBatch(inputs, countryHint, null)) .assertNext(actualResults -> validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), actualResults)) .verifyComplete()); } /** * Test to detect language for each string input of batch with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchListCountryHintWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguagesBatchListCountryHintWithOptionsRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatch(inputs, null, options)) .assertNext(response -> validateDetectLanguageResultCollection(true, getExpectedBatchDetectedLanguages(), response)) .verifyComplete()); } /** * Test to detect language for each string input of batch. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageStringInputRunner((inputs) -> StepVerifier.create(client.detectLanguageBatch(inputs, null, null)) .assertNext(response -> validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), response)) .verifyComplete()); } /** * Verifies that a single DetectedLanguage is returned for a document to detectLanguage. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectSingleTextLanguage(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectSingleTextLanguageRunner(input -> StepVerifier.create(client.detectLanguage(input)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageEnglish(), response)) .verifyComplete()); } /** * Verifies that an TextAnalyticsException is thrown for a document with invalid country hint. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageInvalidCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageInvalidCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_COUNTRY_HINT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } /** * Verifies that TextAnalyticsException is thrown for an empty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.detectLanguage(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } /** * Verifies that detectLanguage returns an "UNKNOWN" result when faulty text is passed. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.detectLanguage(input)) .assertNext(response -> validatePrimaryLanguage(getUnknownDetectedLanguage(), response)) .verifyComplete()); } /** * Verifies that a bad request exception is returned for input documents with same ids. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageDuplicateIdRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } /** * Verifies that an invalid document exception is returned for input documents with an empty ID. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageInputEmptyIdRunner(inputs -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } /** * Verify that with countryHint with empty string will not throw exception. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageEmptyCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response)) .verifyComplete()); } /** * Verify that with countryHint with "none" will not throw exception. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageNoneCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageNoneCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntitiesForSingleTextInputRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .assertNext(response -> validateCategorizedEntities(getCategorizedEntitiesList1(), response.stream().collect(Collectors.toList()))) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntitySingleErrorRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .assertNext(resultCollection -> resultCollection.getValue().forEach(recognizeEntitiesResult -> { Exception exception = assertThrows(TextAnalyticsException.class, recognizeEntitiesResult::getEntities); assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizeEntitiesResult"), exception.getMessage()); })).verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntityRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(false, getExpectedBatchCategorizedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(true, getExpectedBatchCategorizedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntityStringInputRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, null)) .assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntitiesLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, language, null)) .assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForListWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeStringBatchCategorizedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, options)) .assertNext(response -> validateCategorizedEntitiesResultCollection(true, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizePiiSingleDocumentRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .assertNext(response -> validatePiiEntities(getPiiEntitiesList1(), response.stream().collect(Collectors.toList()))) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE.equals(throwable.getMessage())) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitiesRunner((inputs) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(false, getExpectedBatchPiiEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(true, getExpectedBatchPiiEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizePiiLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, language, null)) .assertNext(response -> validatePiiEntitiesResultCollection(false, getExpectedBatchPiiEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeStringBatchPiiEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, null, options)) .assertNext(response -> validatePiiEntitiesResultCollection(true, getExpectedBatchPiiEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedEntitiesForSingleTextInputRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .assertNext(response -> validateLinkedEntity(getLinkedEntitiesList1().get(0), response.iterator().next())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntityRunner((inputs) -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(false, getExpectedBatchLinkedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(true, getExpectedBatchLinkedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedStringInputRunner((inputs) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, null)) .assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, language, null)) .assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchStringLinkedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, options)) .assertNext(response -> validateLinkedEntitiesResultCollection(true, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesForSingleTextInputRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(response -> assertEquals("monde", response.iterator().next())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesDuplicateIdRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesRunner((inputs) -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(false, getExpectedBatchKeyPhrases(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesShowStatsRunner((inputs, options) -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, options)) .assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(true, getExpectedBatchKeyPhrases(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesStringInputRunner((inputs) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesLanguageHintRunner((inputs, language) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, language, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchStringKeyPhrasesShowStatsRunner((inputs, options) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, options)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(true, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesWarning(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesWarningRunner( input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(keyPhrasesResult -> { keyPhrasesResult.getWarnings().forEach(warning -> { assertTrue(WARNING_TOO_LONG_DOCUMENT_INPUT_MESSAGE.equals(warning.getMessage())); assertTrue(LONG_WORDS_IN_DOCUMENT.equals(warning.getWarningCode())); }); }) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesBatchWarning(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesBatchWarningRunner( inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .assertNext(response -> response.getValue().forEach(keyPhrasesResult -> keyPhrasesResult.getKeyPhrases().getWarnings().forEach(warning -> { assertTrue(WARNING_TOO_LONG_DOCUMENT_INPUT_MESSAGE.equals(warning.getMessage())); assertTrue(LONG_WORDS_IN_DOCUMENT.equals(warning.getWarningCode())); }) )) .verifyComplete() ); } /** * Test analyzing sentiment for a string input. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment(TextSentiment.MIXED, new SentimentConfidenceScores(0.0, 0.0, 0.0), new IterableStream<>(Arrays.asList( new SentenceSentiment("", TextSentiment.NEGATIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)), new SentenceSentiment("", TextSentiment.POSITIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)) )), null); StepVerifier .create(client.analyzeSentiment("The hotel was dark and unclean. The restaurant had amazing gnocchi.")) .assertNext(response -> validateAnalyzedSentiment(expectedDocumentSentiment, response)).verifyComplete(); } /** * Verifies that an TextAnalyticsException is thrown for an empty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.analyzeSentiment("")) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify(); } /** * Test analyzing sentiment for a faulty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment( TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0), new IterableStream<>(Arrays.asList( new SentenceSentiment("", TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0)), new SentenceSentiment("", TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0)) )), null); StepVerifier.create(client.analyzeSentiment("!@ .assertNext(response -> validateAnalyzedSentiment(expectedDocumentSentiment, response)).verifyComplete(); } /** * Test analyzing sentiment for a duplicate ID list. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentDuplicateIdRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } /** * Verifies that an invalid document exception is returned for input documents with an empty ID. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } /** * Test analyzing sentiment for a list of string input. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeSentimentStringInputRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, null)) .assertNext(response -> validateSentimentResultCollection(false, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a list of string input with language code. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeSentimentLanguageHintRunner((inputs, language) -> StepVerifier.create(client.analyzeSentimentBatch(inputs, language, null)) .assertNext(response -> validateSentimentResultCollection(false, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a list of string input with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchStringSentimentShowStatsRunner((inputs, options) -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, options)) .assertNext(response -> validateSentimentResultCollection(true, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a batch of documents. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .assertNext(response -> validateSentimentResultCollectionWithResponse(false, getExpectedBatchTextSentiment(), 200, response)) .verifyComplete()); } /** * Verify that we can get statistics on the collection result when given a batch of documents with options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentShowStatsRunner((inputs, options) -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, options)) .assertNext(response -> validateSentimentResultCollectionWithResponse(true, getExpectedBatchTextSentiment(), 200, response)) .verifyComplete()); } }
class TextAnalyticsAsyncClientTest extends TextAnalyticsClientTestBase { private TextAnalyticsAsyncClient client; @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } private TextAnalyticsAsyncClient getTextAnalyticsAsyncClient(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { return getTextAnalyticsAsyncClientBuilder(httpClient, serviceVersion).buildAsyncClient(); } /** * Verify that we can get statistics on the collection result when given a batch of documents with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageShowStatisticsRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options)) .assertNext(response -> validateDetectLanguageResultCollectionWithResponse(true, getExpectedBatchDetectedLanguages(), 200, response) ) .verifyComplete()); } /** * Test to detect language for each {@code DetectLanguageResult} input of a batch. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageRunner((inputs) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null)) .assertNext(response -> validateDetectLanguageResultCollectionWithResponse(false, getExpectedBatchDetectedLanguages(), 200, response)) .verifyComplete()); } /** * Test to detect language for each string input of batch with given country hint. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchListCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguagesCountryHintRunner((inputs, countryHint) -> StepVerifier.create(client.detectLanguageBatch(inputs, countryHint, null)) .assertNext(actualResults -> validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), actualResults)) .verifyComplete()); } /** * Test to detect language for each string input of batch with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchListCountryHintWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguagesBatchListCountryHintWithOptionsRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatch(inputs, null, options)) .assertNext(response -> validateDetectLanguageResultCollection(true, getExpectedBatchDetectedLanguages(), response)) .verifyComplete()); } /** * Test to detect language for each string input of batch. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageStringInputRunner((inputs) -> StepVerifier.create(client.detectLanguageBatch(inputs, null, null)) .assertNext(response -> validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), response)) .verifyComplete()); } /** * Verifies that a single DetectedLanguage is returned for a document to detectLanguage. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectSingleTextLanguage(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectSingleTextLanguageRunner(input -> StepVerifier.create(client.detectLanguage(input)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageEnglish(), response)) .verifyComplete()); } /** * Verifies that an TextAnalyticsException is thrown for a document with invalid country hint. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageInvalidCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageInvalidCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_COUNTRY_HINT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } /** * Verifies that TextAnalyticsException is thrown for an empty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.detectLanguage(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } /** * Verifies that detectLanguage returns an "UNKNOWN" result when faulty text is passed. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.detectLanguage(input)) .assertNext(response -> validatePrimaryLanguage(getUnknownDetectedLanguage(), response)) .verifyComplete()); } /** * Verifies that a bad request exception is returned for input documents with same ids. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageDuplicateIdRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } /** * Verifies that an invalid document exception is returned for input documents with an empty ID. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageInputEmptyIdRunner(inputs -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } /** * Verify that with countryHint with empty string will not throw exception. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageEmptyCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response)) .verifyComplete()); } /** * Verify that with countryHint with "none" will not throw exception. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageNoneCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageNoneCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntitiesForSingleTextInputRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .assertNext(response -> validateCategorizedEntities(getCategorizedEntitiesList1(), response.stream().collect(Collectors.toList()))) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntitySingleErrorRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .assertNext(resultCollection -> resultCollection.getValue().forEach(recognizeEntitiesResult -> { Exception exception = assertThrows(TextAnalyticsException.class, recognizeEntitiesResult::getEntities); assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizeEntitiesResult"), exception.getMessage()); })).verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntityRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(false, getExpectedBatchCategorizedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(true, getExpectedBatchCategorizedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntityStringInputRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, null)) .assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntitiesLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, language, null)) .assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForListWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeStringBatchCategorizedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, options)) .assertNext(response -> validateCategorizedEntitiesResultCollection(true, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizePiiSingleDocumentRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .assertNext(response -> validatePiiEntities(getPiiEntitiesList1(), response.stream().collect(Collectors.toList()))) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE.equals(throwable.getMessage())) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitiesRunner((inputs) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(false, getExpectedBatchPiiEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(true, getExpectedBatchPiiEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizePiiLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, language, null)) .assertNext(response -> validatePiiEntitiesResultCollection(false, getExpectedBatchPiiEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeStringBatchPiiEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, null, options)) .assertNext(response -> validatePiiEntitiesResultCollection(true, getExpectedBatchPiiEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedEntitiesForSingleTextInputRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .assertNext(response -> validateLinkedEntity(getLinkedEntitiesList1().get(0), response.iterator().next())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntityRunner((inputs) -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(false, getExpectedBatchLinkedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(true, getExpectedBatchLinkedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedStringInputRunner((inputs) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, null)) .assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, language, null)) .assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchStringLinkedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, options)) .assertNext(response -> validateLinkedEntitiesResultCollection(true, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesForSingleTextInputRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(response -> assertEquals("monde", response.iterator().next())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesDuplicateIdRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesRunner((inputs) -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(false, getExpectedBatchKeyPhrases(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesShowStatsRunner((inputs, options) -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, options)) .assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(true, getExpectedBatchKeyPhrases(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesStringInputRunner((inputs) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesLanguageHintRunner((inputs, language) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, language, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchStringKeyPhrasesShowStatsRunner((inputs, options) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, options)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(true, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesWarning(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesWarningRunner( input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(keyPhrasesResult -> { keyPhrasesResult.getWarnings().forEach(warning -> { assertTrue(WARNING_TOO_LONG_DOCUMENT_INPUT_MESSAGE.equals(warning.getMessage())); assertTrue(LONG_WORDS_IN_DOCUMENT.equals(warning.getWarningCode())); }); }) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesBatchWarning(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesBatchWarningRunner( inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .assertNext(response -> response.getValue().forEach(keyPhrasesResult -> keyPhrasesResult.getKeyPhrases().getWarnings().forEach(warning -> { assertTrue(WARNING_TOO_LONG_DOCUMENT_INPUT_MESSAGE.equals(warning.getMessage())); assertTrue(LONG_WORDS_IN_DOCUMENT.equals(warning.getWarningCode())); }) )) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } /** * Test analyzing sentiment for a string input. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment(TextSentiment.MIXED, new SentimentConfidenceScores(0.0, 0.0, 0.0), new IterableStream<>(Arrays.asList( new SentenceSentiment("", TextSentiment.NEGATIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)), new SentenceSentiment("", TextSentiment.POSITIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)) )), null); StepVerifier .create(client.analyzeSentiment("The hotel was dark and unclean. The restaurant had amazing gnocchi.")) .assertNext(response -> validateAnalyzedSentiment(expectedDocumentSentiment, response)).verifyComplete(); } /** * Verifies that an TextAnalyticsException is thrown for an empty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.analyzeSentiment("")) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify(); } /** * Test analyzing sentiment for a faulty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment( TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0), new IterableStream<>(Arrays.asList( new SentenceSentiment("", TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0)), new SentenceSentiment("", TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0)) )), null); StepVerifier.create(client.analyzeSentiment("!@ .assertNext(response -> validateAnalyzedSentiment(expectedDocumentSentiment, response)).verifyComplete(); } /** * Test analyzing sentiment for a duplicate ID list. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentDuplicateIdRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } /** * Verifies that an invalid document exception is returned for input documents with an empty ID. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } /** * Test analyzing sentiment for a list of string input. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeSentimentStringInputRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, null)) .assertNext(response -> validateSentimentResultCollection(false, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a list of string input with language code. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeSentimentLanguageHintRunner((inputs, language) -> StepVerifier.create(client.analyzeSentimentBatch(inputs, language, null)) .assertNext(response -> validateSentimentResultCollection(false, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a list of string input with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchStringSentimentShowStatsRunner((inputs, options) -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, options)) .assertNext(response -> validateSentimentResultCollection(true, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a batch of documents. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .assertNext(response -> validateSentimentResultCollectionWithResponse(false, getExpectedBatchTextSentiment(), 200, response)) .verifyComplete()); } /** * Verify that we can get statistics on the collection result when given a batch of documents with options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentShowStatsRunner((inputs, options) -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, options)) .assertNext(response -> validateSentimentResultCollectionWithResponse(true, getExpectedBatchTextSentiment(), 200, response)) .verifyComplete()); } /** * Verifies that an InvalidDocumentBatch exception is returned for input documents with too many documents. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } }
Seems like form [here](https://github.com/Azure/azure-sdk-for-java/pull/14324/files#diff-f8937ef2df39148b27c109212931fa8aR18) we are getting non-null values. What exactly is missing?
public void detectLanguageEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageInputEmptyIdRunner(inputs -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); }
public void detectLanguageEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageInputEmptyIdRunner(inputs -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); }
class TextAnalyticsAsyncClientTest extends TextAnalyticsClientTestBase { private TextAnalyticsAsyncClient client; @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } private TextAnalyticsAsyncClient getTextAnalyticsAsyncClient(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { return getTextAnalyticsAsyncClientBuilder(httpClient, serviceVersion).buildAsyncClient(); } /** * Verify that we can get statistics on the collection result when given a batch of documents with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageShowStatisticsRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options)) .assertNext(response -> validateDetectLanguageResultCollectionWithResponse(true, getExpectedBatchDetectedLanguages(), 200, response) ) .verifyComplete()); } /** * Test to detect language for each {@code DetectLanguageResult} input of a batch. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageRunner((inputs) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null)) .assertNext(response -> validateDetectLanguageResultCollectionWithResponse(false, getExpectedBatchDetectedLanguages(), 200, response)) .verifyComplete()); } /** * Test to detect language for each string input of batch with given country hint. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchListCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguagesCountryHintRunner((inputs, countryHint) -> StepVerifier.create(client.detectLanguageBatch(inputs, countryHint, null)) .assertNext(actualResults -> validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), actualResults)) .verifyComplete()); } /** * Test to detect language for each string input of batch with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchListCountryHintWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguagesBatchListCountryHintWithOptionsRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatch(inputs, null, options)) .assertNext(response -> validateDetectLanguageResultCollection(true, getExpectedBatchDetectedLanguages(), response)) .verifyComplete()); } /** * Test to detect language for each string input of batch. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageStringInputRunner((inputs) -> StepVerifier.create(client.detectLanguageBatch(inputs, null, null)) .assertNext(response -> validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), response)) .verifyComplete()); } /** * Verifies that a single DetectedLanguage is returned for a document to detectLanguage. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectSingleTextLanguage(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectSingleTextLanguageRunner(input -> StepVerifier.create(client.detectLanguage(input)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageEnglish(), response)) .verifyComplete()); } /** * Verifies that an TextAnalyticsException is thrown for a document with invalid country hint. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageInvalidCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageInvalidCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_COUNTRY_HINT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } /** * Verifies that TextAnalyticsException is thrown for an empty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.detectLanguage(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } /** * Verifies that detectLanguage returns an "UNKNOWN" result when faulty text is passed. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.detectLanguage(input)) .assertNext(response -> validatePrimaryLanguage(getUnknownDetectedLanguage(), response)) .verifyComplete()); } /** * Verifies that a bad request exception is returned for input documents with same ids. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageDuplicateIdRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } /** * Verifies that an invalid document exception is returned for input documents with an empty ID. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils /** * Verify that with countryHint with empty string will not throw exception. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageEmptyCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response)) .verifyComplete()); } /** * Verify that with countryHint with "none" will not throw exception. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageNoneCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageNoneCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntitiesForSingleTextInputRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .assertNext(response -> validateCategorizedEntities(getCategorizedEntitiesList1(), response.stream().collect(Collectors.toList()))) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntitySingleErrorRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .assertNext(resultCollection -> resultCollection.getValue().forEach(recognizeEntitiesResult -> { Exception exception = assertThrows(TextAnalyticsException.class, recognizeEntitiesResult::getEntities); assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizeEntitiesResult"), exception.getMessage()); })).verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntityRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(false, getExpectedBatchCategorizedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(true, getExpectedBatchCategorizedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntityStringInputRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, null)) .assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntitiesLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, language, null)) .assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForListWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeStringBatchCategorizedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, options)) .assertNext(response -> validateCategorizedEntitiesResultCollection(true, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizePiiSingleDocumentRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .assertNext(response -> validatePiiEntities(getPiiEntitiesList1(), response.stream().collect(Collectors.toList()))) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE.equals(throwable.getMessage())) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitySingleErrorRunner((inputs) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .assertNext(resultCollection -> resultCollection.getValue().forEach(recognizePiiEntitiesResult -> { Exception exception = assertThrows(TextAnalyticsException.class, recognizePiiEntitiesResult::getEntities); assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizePiiEntitiesResult"), exception.getMessage()); })).verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitiesRunner((inputs) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(false, getExpectedBatchPiiEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(true, getExpectedBatchPiiEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizePiiLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, language, null)) .assertNext(response -> validatePiiEntitiesResultCollection(false, getExpectedBatchPiiEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeStringBatchPiiEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, null, options)) .assertNext(response -> validatePiiEntitiesResultCollection(true, getExpectedBatchPiiEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedEntitiesForSingleTextInputRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .assertNext(response -> validateLinkedEntity(getLinkedEntitiesList1().get(0), response.iterator().next())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntityRunner((inputs) -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(false, getExpectedBatchLinkedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(true, getExpectedBatchLinkedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedStringInputRunner((inputs) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, null)) .assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, language, null)) .assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchStringLinkedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, options)) .assertNext(response -> validateLinkedEntitiesResultCollection(true, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesForSingleTextInputRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(response -> assertEquals("monde", response.iterator().next())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesDuplicateIdRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesRunner((inputs) -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(false, getExpectedBatchKeyPhrases(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesShowStatsRunner((inputs, options) -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, options)) .assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(true, getExpectedBatchKeyPhrases(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesStringInputRunner((inputs) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesLanguageHintRunner((inputs, language) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, language, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchStringKeyPhrasesShowStatsRunner((inputs, options) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, options)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(true, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesWarning(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesWarningRunner( input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(keyPhrasesResult -> { keyPhrasesResult.getWarnings().forEach(warning -> { assertTrue(WARNING_TOO_LONG_DOCUMENT_INPUT_MESSAGE.equals(warning.getMessage())); assertTrue(LONG_WORDS_IN_DOCUMENT.equals(warning.getWarningCode())); }); }) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesBatchWarning(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesBatchWarningRunner( inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .assertNext(response -> response.getValue().forEach(keyPhrasesResult -> keyPhrasesResult.getKeyPhrases().getWarnings().forEach(warning -> { assertTrue(WARNING_TOO_LONG_DOCUMENT_INPUT_MESSAGE.equals(warning.getMessage())); assertTrue(LONG_WORDS_IN_DOCUMENT.equals(warning.getWarningCode())); }) )) .verifyComplete() ); } /** * Test analyzing sentiment for a string input. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment(TextSentiment.MIXED, new SentimentConfidenceScores(0.0, 0.0, 0.0), new IterableStream<>(Arrays.asList( new SentenceSentiment("", TextSentiment.NEGATIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)), new SentenceSentiment("", TextSentiment.POSITIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)) )), null); StepVerifier .create(client.analyzeSentiment("The hotel was dark and unclean. The restaurant had amazing gnocchi.")) .assertNext(response -> validateAnalyzedSentiment(expectedDocumentSentiment, response)).verifyComplete(); } /** * Verifies that an TextAnalyticsException is thrown for an empty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.analyzeSentiment("")) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify(); } /** * Test analyzing sentiment for a faulty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment( TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0), new IterableStream<>(Arrays.asList( new SentenceSentiment("", TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0)), new SentenceSentiment("", TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0)) )), null); StepVerifier.create(client.analyzeSentiment("!@ .assertNext(response -> validateAnalyzedSentiment(expectedDocumentSentiment, response)).verifyComplete(); } /** * Test analyzing sentiment for a duplicate ID list. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentDuplicateIdRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } /** * Verifies that an invalid document exception is returned for input documents with an empty ID. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } /** * Test analyzing sentiment for a list of string input. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeSentimentStringInputRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, null)) .assertNext(response -> validateSentimentResultCollection(false, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a list of string input with language code. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeSentimentLanguageHintRunner((inputs, language) -> StepVerifier.create(client.analyzeSentimentBatch(inputs, language, null)) .assertNext(response -> validateSentimentResultCollection(false, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a list of string input with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchStringSentimentShowStatsRunner((inputs, options) -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, options)) .assertNext(response -> validateSentimentResultCollection(true, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a batch of documents. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .assertNext(response -> validateSentimentResultCollectionWithResponse(false, getExpectedBatchTextSentiment(), 200, response)) .verifyComplete()); } /** * Verify that we can get statistics on the collection result when given a batch of documents with options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentShowStatsRunner((inputs, options) -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, options)) .assertNext(response -> validateSentimentResultCollectionWithResponse(true, getExpectedBatchTextSentiment(), 200, response)) .verifyComplete()); } }
class TextAnalyticsAsyncClientTest extends TextAnalyticsClientTestBase { private TextAnalyticsAsyncClient client; @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } private TextAnalyticsAsyncClient getTextAnalyticsAsyncClient(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { return getTextAnalyticsAsyncClientBuilder(httpClient, serviceVersion).buildAsyncClient(); } /** * Verify that we can get statistics on the collection result when given a batch of documents with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageShowStatisticsRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options)) .assertNext(response -> validateDetectLanguageResultCollectionWithResponse(true, getExpectedBatchDetectedLanguages(), 200, response) ) .verifyComplete()); } /** * Test to detect language for each {@code DetectLanguageResult} input of a batch. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageRunner((inputs) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null)) .assertNext(response -> validateDetectLanguageResultCollectionWithResponse(false, getExpectedBatchDetectedLanguages(), 200, response)) .verifyComplete()); } /** * Test to detect language for each string input of batch with given country hint. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchListCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguagesCountryHintRunner((inputs, countryHint) -> StepVerifier.create(client.detectLanguageBatch(inputs, countryHint, null)) .assertNext(actualResults -> validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), actualResults)) .verifyComplete()); } /** * Test to detect language for each string input of batch with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchListCountryHintWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguagesBatchListCountryHintWithOptionsRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatch(inputs, null, options)) .assertNext(response -> validateDetectLanguageResultCollection(true, getExpectedBatchDetectedLanguages(), response)) .verifyComplete()); } /** * Test to detect language for each string input of batch. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageStringInputRunner((inputs) -> StepVerifier.create(client.detectLanguageBatch(inputs, null, null)) .assertNext(response -> validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), response)) .verifyComplete()); } /** * Verifies that a single DetectedLanguage is returned for a document to detectLanguage. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectSingleTextLanguage(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectSingleTextLanguageRunner(input -> StepVerifier.create(client.detectLanguage(input)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageEnglish(), response)) .verifyComplete()); } /** * Verifies that an TextAnalyticsException is thrown for a document with invalid country hint. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageInvalidCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageInvalidCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_COUNTRY_HINT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } /** * Verifies that TextAnalyticsException is thrown for an empty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.detectLanguage(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } /** * Verifies that detectLanguage returns an "UNKNOWN" result when faulty text is passed. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.detectLanguage(input)) .assertNext(response -> validatePrimaryLanguage(getUnknownDetectedLanguage(), response)) .verifyComplete()); } /** * Verifies that a bad request exception is returned for input documents with same ids. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageDuplicateIdRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } /** * Verifies that an invalid document exception is returned for input documents with an empty ID. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils /** * Verify that with countryHint with empty string will not throw exception. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageEmptyCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response)) .verifyComplete()); } /** * Verify that with countryHint with "none" will not throw exception. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageNoneCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageNoneCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntitiesForSingleTextInputRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .assertNext(response -> validateCategorizedEntities(getCategorizedEntitiesList1(), response.stream().collect(Collectors.toList()))) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntitySingleErrorRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .assertNext(resultCollection -> resultCollection.getValue().forEach(recognizeEntitiesResult -> { Exception exception = assertThrows(TextAnalyticsException.class, recognizeEntitiesResult::getEntities); assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizeEntitiesResult"), exception.getMessage()); })).verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntityRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(false, getExpectedBatchCategorizedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(true, getExpectedBatchCategorizedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntityStringInputRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, null)) .assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntitiesLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, language, null)) .assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForListWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeStringBatchCategorizedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, options)) .assertNext(response -> validateCategorizedEntitiesResultCollection(true, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizePiiSingleDocumentRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .assertNext(response -> validatePiiEntities(getPiiEntitiesList1(), response.stream().collect(Collectors.toList()))) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE.equals(throwable.getMessage())) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitySingleErrorRunner((inputs) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .assertNext(resultCollection -> resultCollection.getValue().forEach(recognizePiiEntitiesResult -> { Exception exception = assertThrows(TextAnalyticsException.class, recognizePiiEntitiesResult::getEntities); assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizePiiEntitiesResult"), exception.getMessage()); })).verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitiesRunner((inputs) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(false, getExpectedBatchPiiEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(true, getExpectedBatchPiiEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizePiiLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, language, null)) .assertNext(response -> validatePiiEntitiesResultCollection(false, getExpectedBatchPiiEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeStringBatchPiiEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, null, options)) .assertNext(response -> validatePiiEntitiesResultCollection(true, getExpectedBatchPiiEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedEntitiesForSingleTextInputRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .assertNext(response -> validateLinkedEntity(getLinkedEntitiesList1().get(0), response.iterator().next())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntityRunner((inputs) -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(false, getExpectedBatchLinkedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(true, getExpectedBatchLinkedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedStringInputRunner((inputs) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, null)) .assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, language, null)) .assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchStringLinkedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, options)) .assertNext(response -> validateLinkedEntitiesResultCollection(true, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesForSingleTextInputRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(response -> assertEquals("monde", response.iterator().next())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesDuplicateIdRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesRunner((inputs) -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(false, getExpectedBatchKeyPhrases(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesShowStatsRunner((inputs, options) -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, options)) .assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(true, getExpectedBatchKeyPhrases(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesStringInputRunner((inputs) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesLanguageHintRunner((inputs, language) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, language, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchStringKeyPhrasesShowStatsRunner((inputs, options) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, options)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(true, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesWarning(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesWarningRunner( input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(keyPhrasesResult -> { keyPhrasesResult.getWarnings().forEach(warning -> { assertTrue(WARNING_TOO_LONG_DOCUMENT_INPUT_MESSAGE.equals(warning.getMessage())); assertTrue(LONG_WORDS_IN_DOCUMENT.equals(warning.getWarningCode())); }); }) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesBatchWarning(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesBatchWarningRunner( inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .assertNext(response -> response.getValue().forEach(keyPhrasesResult -> keyPhrasesResult.getKeyPhrases().getWarnings().forEach(warning -> { assertTrue(WARNING_TOO_LONG_DOCUMENT_INPUT_MESSAGE.equals(warning.getMessage())); assertTrue(LONG_WORDS_IN_DOCUMENT.equals(warning.getWarningCode())); }) )) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } /** * Test analyzing sentiment for a string input. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment(TextSentiment.MIXED, new SentimentConfidenceScores(0.0, 0.0, 0.0), new IterableStream<>(Arrays.asList( new SentenceSentiment("", TextSentiment.NEGATIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)), new SentenceSentiment("", TextSentiment.POSITIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)) )), null); StepVerifier .create(client.analyzeSentiment("The hotel was dark and unclean. The restaurant had amazing gnocchi.")) .assertNext(response -> validateAnalyzedSentiment(expectedDocumentSentiment, response)).verifyComplete(); } /** * Verifies that an TextAnalyticsException is thrown for an empty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.analyzeSentiment("")) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify(); } /** * Test analyzing sentiment for a faulty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment( TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0), new IterableStream<>(Arrays.asList( new SentenceSentiment("", TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0)), new SentenceSentiment("", TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0)) )), null); StepVerifier.create(client.analyzeSentiment("!@ .assertNext(response -> validateAnalyzedSentiment(expectedDocumentSentiment, response)).verifyComplete(); } /** * Test analyzing sentiment for a duplicate ID list. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentDuplicateIdRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } /** * Verifies that an invalid document exception is returned for input documents with an empty ID. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } /** * Test analyzing sentiment for a list of string input. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeSentimentStringInputRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, null)) .assertNext(response -> validateSentimentResultCollection(false, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a list of string input with language code. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeSentimentLanguageHintRunner((inputs, language) -> StepVerifier.create(client.analyzeSentimentBatch(inputs, language, null)) .assertNext(response -> validateSentimentResultCollection(false, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a list of string input with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchStringSentimentShowStatsRunner((inputs, options) -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, options)) .assertNext(response -> validateSentimentResultCollection(true, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a batch of documents. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .assertNext(response -> validateSentimentResultCollectionWithResponse(false, getExpectedBatchTextSentiment(), 200, response)) .verifyComplete()); } /** * Verify that we can get statistics on the collection result when given a batch of documents with options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentShowStatsRunner((inputs, options) -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, options)) .assertNext(response -> validateSentimentResultCollectionWithResponse(true, getExpectedBatchTextSentiment(), 200, response)) .verifyComplete()); } /** * Verifies that an InvalidDocumentBatch exception is returned for input documents with too many documents. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } }
The JSON output is same as the postman output. Java deserialization doesn't parse the "error" object in this case. More detail can be found in here: https://teams.microsoft.com/l/message/19:d6ff4003f5c848a2a2d6dcdfc0ebd497@thread.skype/1597103339389?tenantId=72f988bf-86f1-41af-91ab-2d7cd011db47&groupId=3e17dcb0-4257-4a30-b843-77f47f1d4121&parentMessageId=1597103339389&teamName=Azure%20SDK&channelName=Service%20-%20Text%20Analytics&createdTime=1597103339389
public void detectLanguageEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageInputEmptyIdRunner(inputs -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); }
public void detectLanguageEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageInputEmptyIdRunner(inputs -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); }
class TextAnalyticsAsyncClientTest extends TextAnalyticsClientTestBase { private TextAnalyticsAsyncClient client; @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } private TextAnalyticsAsyncClient getTextAnalyticsAsyncClient(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { return getTextAnalyticsAsyncClientBuilder(httpClient, serviceVersion).buildAsyncClient(); } /** * Verify that we can get statistics on the collection result when given a batch of documents with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageShowStatisticsRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options)) .assertNext(response -> validateDetectLanguageResultCollectionWithResponse(true, getExpectedBatchDetectedLanguages(), 200, response) ) .verifyComplete()); } /** * Test to detect language for each {@code DetectLanguageResult} input of a batch. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageRunner((inputs) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null)) .assertNext(response -> validateDetectLanguageResultCollectionWithResponse(false, getExpectedBatchDetectedLanguages(), 200, response)) .verifyComplete()); } /** * Test to detect language for each string input of batch with given country hint. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchListCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguagesCountryHintRunner((inputs, countryHint) -> StepVerifier.create(client.detectLanguageBatch(inputs, countryHint, null)) .assertNext(actualResults -> validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), actualResults)) .verifyComplete()); } /** * Test to detect language for each string input of batch with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchListCountryHintWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguagesBatchListCountryHintWithOptionsRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatch(inputs, null, options)) .assertNext(response -> validateDetectLanguageResultCollection(true, getExpectedBatchDetectedLanguages(), response)) .verifyComplete()); } /** * Test to detect language for each string input of batch. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageStringInputRunner((inputs) -> StepVerifier.create(client.detectLanguageBatch(inputs, null, null)) .assertNext(response -> validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), response)) .verifyComplete()); } /** * Verifies that a single DetectedLanguage is returned for a document to detectLanguage. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectSingleTextLanguage(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectSingleTextLanguageRunner(input -> StepVerifier.create(client.detectLanguage(input)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageEnglish(), response)) .verifyComplete()); } /** * Verifies that an TextAnalyticsException is thrown for a document with invalid country hint. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageInvalidCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageInvalidCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_COUNTRY_HINT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } /** * Verifies that TextAnalyticsException is thrown for an empty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.detectLanguage(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } /** * Verifies that detectLanguage returns an "UNKNOWN" result when faulty text is passed. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.detectLanguage(input)) .assertNext(response -> validatePrimaryLanguage(getUnknownDetectedLanguage(), response)) .verifyComplete()); } /** * Verifies that a bad request exception is returned for input documents with same ids. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageDuplicateIdRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } /** * Verifies that an invalid document exception is returned for input documents with an empty ID. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils /** * Verify that with countryHint with empty string will not throw exception. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageEmptyCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response)) .verifyComplete()); } /** * Verify that with countryHint with "none" will not throw exception. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageNoneCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageNoneCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntitiesForSingleTextInputRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .assertNext(response -> validateCategorizedEntities(getCategorizedEntitiesList1(), response.stream().collect(Collectors.toList()))) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntitySingleErrorRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .assertNext(resultCollection -> resultCollection.getValue().forEach(recognizeEntitiesResult -> { Exception exception = assertThrows(TextAnalyticsException.class, recognizeEntitiesResult::getEntities); assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizeEntitiesResult"), exception.getMessage()); })).verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntityRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(false, getExpectedBatchCategorizedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(true, getExpectedBatchCategorizedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntityStringInputRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, null)) .assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntitiesLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, language, null)) .assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForListWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeStringBatchCategorizedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, options)) .assertNext(response -> validateCategorizedEntitiesResultCollection(true, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizePiiSingleDocumentRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .assertNext(response -> validatePiiEntities(getPiiEntitiesList1(), response.stream().collect(Collectors.toList()))) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE.equals(throwable.getMessage())) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitySingleErrorRunner((inputs) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .assertNext(resultCollection -> resultCollection.getValue().forEach(recognizePiiEntitiesResult -> { Exception exception = assertThrows(TextAnalyticsException.class, recognizePiiEntitiesResult::getEntities); assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizePiiEntitiesResult"), exception.getMessage()); })).verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitiesRunner((inputs) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(false, getExpectedBatchPiiEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(true, getExpectedBatchPiiEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizePiiLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, language, null)) .assertNext(response -> validatePiiEntitiesResultCollection(false, getExpectedBatchPiiEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeStringBatchPiiEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, null, options)) .assertNext(response -> validatePiiEntitiesResultCollection(true, getExpectedBatchPiiEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedEntitiesForSingleTextInputRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .assertNext(response -> validateLinkedEntity(getLinkedEntitiesList1().get(0), response.iterator().next())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntityRunner((inputs) -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(false, getExpectedBatchLinkedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(true, getExpectedBatchLinkedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedStringInputRunner((inputs) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, null)) .assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, language, null)) .assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchStringLinkedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, options)) .assertNext(response -> validateLinkedEntitiesResultCollection(true, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesForSingleTextInputRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(response -> assertEquals("monde", response.iterator().next())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesDuplicateIdRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesRunner((inputs) -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(false, getExpectedBatchKeyPhrases(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesShowStatsRunner((inputs, options) -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, options)) .assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(true, getExpectedBatchKeyPhrases(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesStringInputRunner((inputs) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesLanguageHintRunner((inputs, language) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, language, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchStringKeyPhrasesShowStatsRunner((inputs, options) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, options)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(true, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesWarning(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesWarningRunner( input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(keyPhrasesResult -> { keyPhrasesResult.getWarnings().forEach(warning -> { assertTrue(WARNING_TOO_LONG_DOCUMENT_INPUT_MESSAGE.equals(warning.getMessage())); assertTrue(LONG_WORDS_IN_DOCUMENT.equals(warning.getWarningCode())); }); }) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesBatchWarning(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesBatchWarningRunner( inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .assertNext(response -> response.getValue().forEach(keyPhrasesResult -> keyPhrasesResult.getKeyPhrases().getWarnings().forEach(warning -> { assertTrue(WARNING_TOO_LONG_DOCUMENT_INPUT_MESSAGE.equals(warning.getMessage())); assertTrue(LONG_WORDS_IN_DOCUMENT.equals(warning.getWarningCode())); }) )) .verifyComplete() ); } /** * Test analyzing sentiment for a string input. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment(TextSentiment.MIXED, new SentimentConfidenceScores(0.0, 0.0, 0.0), new IterableStream<>(Arrays.asList( new SentenceSentiment("", TextSentiment.NEGATIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)), new SentenceSentiment("", TextSentiment.POSITIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)) )), null); StepVerifier .create(client.analyzeSentiment("The hotel was dark and unclean. The restaurant had amazing gnocchi.")) .assertNext(response -> validateAnalyzedSentiment(expectedDocumentSentiment, response)).verifyComplete(); } /** * Verifies that an TextAnalyticsException is thrown for an empty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.analyzeSentiment("")) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify(); } /** * Test analyzing sentiment for a faulty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment( TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0), new IterableStream<>(Arrays.asList( new SentenceSentiment("", TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0)), new SentenceSentiment("", TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0)) )), null); StepVerifier.create(client.analyzeSentiment("!@ .assertNext(response -> validateAnalyzedSentiment(expectedDocumentSentiment, response)).verifyComplete(); } /** * Test analyzing sentiment for a duplicate ID list. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentDuplicateIdRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } /** * Verifies that an invalid document exception is returned for input documents with an empty ID. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } /** * Test analyzing sentiment for a list of string input. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeSentimentStringInputRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, null)) .assertNext(response -> validateSentimentResultCollection(false, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a list of string input with language code. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeSentimentLanguageHintRunner((inputs, language) -> StepVerifier.create(client.analyzeSentimentBatch(inputs, language, null)) .assertNext(response -> validateSentimentResultCollection(false, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a list of string input with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchStringSentimentShowStatsRunner((inputs, options) -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, options)) .assertNext(response -> validateSentimentResultCollection(true, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a batch of documents. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .assertNext(response -> validateSentimentResultCollectionWithResponse(false, getExpectedBatchTextSentiment(), 200, response)) .verifyComplete()); } /** * Verify that we can get statistics on the collection result when given a batch of documents with options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentShowStatsRunner((inputs, options) -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, options)) .assertNext(response -> validateSentimentResultCollectionWithResponse(true, getExpectedBatchTextSentiment(), 200, response)) .verifyComplete()); } }
class TextAnalyticsAsyncClientTest extends TextAnalyticsClientTestBase { private TextAnalyticsAsyncClient client; @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } private TextAnalyticsAsyncClient getTextAnalyticsAsyncClient(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { return getTextAnalyticsAsyncClientBuilder(httpClient, serviceVersion).buildAsyncClient(); } /** * Verify that we can get statistics on the collection result when given a batch of documents with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageShowStatisticsRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options)) .assertNext(response -> validateDetectLanguageResultCollectionWithResponse(true, getExpectedBatchDetectedLanguages(), 200, response) ) .verifyComplete()); } /** * Test to detect language for each {@code DetectLanguageResult} input of a batch. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageRunner((inputs) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null)) .assertNext(response -> validateDetectLanguageResultCollectionWithResponse(false, getExpectedBatchDetectedLanguages(), 200, response)) .verifyComplete()); } /** * Test to detect language for each string input of batch with given country hint. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchListCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguagesCountryHintRunner((inputs, countryHint) -> StepVerifier.create(client.detectLanguageBatch(inputs, countryHint, null)) .assertNext(actualResults -> validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), actualResults)) .verifyComplete()); } /** * Test to detect language for each string input of batch with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchListCountryHintWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguagesBatchListCountryHintWithOptionsRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatch(inputs, null, options)) .assertNext(response -> validateDetectLanguageResultCollection(true, getExpectedBatchDetectedLanguages(), response)) .verifyComplete()); } /** * Test to detect language for each string input of batch. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageStringInputRunner((inputs) -> StepVerifier.create(client.detectLanguageBatch(inputs, null, null)) .assertNext(response -> validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), response)) .verifyComplete()); } /** * Verifies that a single DetectedLanguage is returned for a document to detectLanguage. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectSingleTextLanguage(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectSingleTextLanguageRunner(input -> StepVerifier.create(client.detectLanguage(input)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageEnglish(), response)) .verifyComplete()); } /** * Verifies that an TextAnalyticsException is thrown for a document with invalid country hint. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageInvalidCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageInvalidCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_COUNTRY_HINT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } /** * Verifies that TextAnalyticsException is thrown for an empty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.detectLanguage(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } /** * Verifies that detectLanguage returns an "UNKNOWN" result when faulty text is passed. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.detectLanguage(input)) .assertNext(response -> validatePrimaryLanguage(getUnknownDetectedLanguage(), response)) .verifyComplete()); } /** * Verifies that a bad request exception is returned for input documents with same ids. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageDuplicateIdRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } /** * Verifies that an invalid document exception is returned for input documents with an empty ID. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils /** * Verify that with countryHint with empty string will not throw exception. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageEmptyCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response)) .verifyComplete()); } /** * Verify that with countryHint with "none" will not throw exception. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageNoneCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageNoneCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntitiesForSingleTextInputRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .assertNext(response -> validateCategorizedEntities(getCategorizedEntitiesList1(), response.stream().collect(Collectors.toList()))) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntitySingleErrorRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .assertNext(resultCollection -> resultCollection.getValue().forEach(recognizeEntitiesResult -> { Exception exception = assertThrows(TextAnalyticsException.class, recognizeEntitiesResult::getEntities); assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizeEntitiesResult"), exception.getMessage()); })).verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntityRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(false, getExpectedBatchCategorizedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(true, getExpectedBatchCategorizedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntityStringInputRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, null)) .assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntitiesLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, language, null)) .assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForListWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeStringBatchCategorizedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, options)) .assertNext(response -> validateCategorizedEntitiesResultCollection(true, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizePiiSingleDocumentRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .assertNext(response -> validatePiiEntities(getPiiEntitiesList1(), response.stream().collect(Collectors.toList()))) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE.equals(throwable.getMessage())) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitySingleErrorRunner((inputs) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .assertNext(resultCollection -> resultCollection.getValue().forEach(recognizePiiEntitiesResult -> { Exception exception = assertThrows(TextAnalyticsException.class, recognizePiiEntitiesResult::getEntities); assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizePiiEntitiesResult"), exception.getMessage()); })).verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitiesRunner((inputs) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(false, getExpectedBatchPiiEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(true, getExpectedBatchPiiEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizePiiLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, language, null)) .assertNext(response -> validatePiiEntitiesResultCollection(false, getExpectedBatchPiiEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeStringBatchPiiEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, null, options)) .assertNext(response -> validatePiiEntitiesResultCollection(true, getExpectedBatchPiiEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedEntitiesForSingleTextInputRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .assertNext(response -> validateLinkedEntity(getLinkedEntitiesList1().get(0), response.iterator().next())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntityRunner((inputs) -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(false, getExpectedBatchLinkedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(true, getExpectedBatchLinkedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedStringInputRunner((inputs) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, null)) .assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, language, null)) .assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchStringLinkedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, options)) .assertNext(response -> validateLinkedEntitiesResultCollection(true, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesForSingleTextInputRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(response -> assertEquals("monde", response.iterator().next())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesDuplicateIdRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesRunner((inputs) -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(false, getExpectedBatchKeyPhrases(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesShowStatsRunner((inputs, options) -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, options)) .assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(true, getExpectedBatchKeyPhrases(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesStringInputRunner((inputs) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesLanguageHintRunner((inputs, language) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, language, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchStringKeyPhrasesShowStatsRunner((inputs, options) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, options)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(true, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesWarning(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesWarningRunner( input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(keyPhrasesResult -> { keyPhrasesResult.getWarnings().forEach(warning -> { assertTrue(WARNING_TOO_LONG_DOCUMENT_INPUT_MESSAGE.equals(warning.getMessage())); assertTrue(LONG_WORDS_IN_DOCUMENT.equals(warning.getWarningCode())); }); }) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesBatchWarning(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesBatchWarningRunner( inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .assertNext(response -> response.getValue().forEach(keyPhrasesResult -> keyPhrasesResult.getKeyPhrases().getWarnings().forEach(warning -> { assertTrue(WARNING_TOO_LONG_DOCUMENT_INPUT_MESSAGE.equals(warning.getMessage())); assertTrue(LONG_WORDS_IN_DOCUMENT.equals(warning.getWarningCode())); }) )) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } /** * Test analyzing sentiment for a string input. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment(TextSentiment.MIXED, new SentimentConfidenceScores(0.0, 0.0, 0.0), new IterableStream<>(Arrays.asList( new SentenceSentiment("", TextSentiment.NEGATIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)), new SentenceSentiment("", TextSentiment.POSITIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)) )), null); StepVerifier .create(client.analyzeSentiment("The hotel was dark and unclean. The restaurant had amazing gnocchi.")) .assertNext(response -> validateAnalyzedSentiment(expectedDocumentSentiment, response)).verifyComplete(); } /** * Verifies that an TextAnalyticsException is thrown for an empty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.analyzeSentiment("")) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify(); } /** * Test analyzing sentiment for a faulty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment( TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0), new IterableStream<>(Arrays.asList( new SentenceSentiment("", TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0)), new SentenceSentiment("", TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0)) )), null); StepVerifier.create(client.analyzeSentiment("!@ .assertNext(response -> validateAnalyzedSentiment(expectedDocumentSentiment, response)).verifyComplete(); } /** * Test analyzing sentiment for a duplicate ID list. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentDuplicateIdRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } /** * Verifies that an invalid document exception is returned for input documents with an empty ID. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } /** * Test analyzing sentiment for a list of string input. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeSentimentStringInputRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, null)) .assertNext(response -> validateSentimentResultCollection(false, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a list of string input with language code. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeSentimentLanguageHintRunner((inputs, language) -> StepVerifier.create(client.analyzeSentimentBatch(inputs, language, null)) .assertNext(response -> validateSentimentResultCollection(false, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a list of string input with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchStringSentimentShowStatsRunner((inputs, options) -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, options)) .assertNext(response -> validateSentimentResultCollection(true, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a batch of documents. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .assertNext(response -> validateSentimentResultCollectionWithResponse(false, getExpectedBatchTextSentiment(), 200, response)) .verifyComplete()); } /** * Verify that we can get statistics on the collection result when given a batch of documents with options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentShowStatsRunner((inputs, options) -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, options)) .assertNext(response -> validateSentimentResultCollectionWithResponse(true, getExpectedBatchTextSentiment(), 200, response)) .verifyComplete()); } /** * Verifies that an InvalidDocumentBatch exception is returned for input documents with too many documents. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } }
just to clarify, you're going to start asserting the error code and message once you regenerate with the swagger fix?
public void recognizeEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); }
public void recognizeEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); }
class TextAnalyticsAsyncClientTest extends TextAnalyticsClientTestBase { private TextAnalyticsAsyncClient client; @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } private TextAnalyticsAsyncClient getTextAnalyticsAsyncClient(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { return getTextAnalyticsAsyncClientBuilder(httpClient, serviceVersion).buildAsyncClient(); } /** * Verify that we can get statistics on the collection result when given a batch of documents with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageShowStatisticsRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options)) .assertNext(response -> validateDetectLanguageResultCollectionWithResponse(true, getExpectedBatchDetectedLanguages(), 200, response) ) .verifyComplete()); } /** * Test to detect language for each {@code DetectLanguageResult} input of a batch. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageRunner((inputs) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null)) .assertNext(response -> validateDetectLanguageResultCollectionWithResponse(false, getExpectedBatchDetectedLanguages(), 200, response)) .verifyComplete()); } /** * Test to detect language for each string input of batch with given country hint. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchListCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguagesCountryHintRunner((inputs, countryHint) -> StepVerifier.create(client.detectLanguageBatch(inputs, countryHint, null)) .assertNext(actualResults -> validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), actualResults)) .verifyComplete()); } /** * Test to detect language for each string input of batch with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchListCountryHintWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguagesBatchListCountryHintWithOptionsRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatch(inputs, null, options)) .assertNext(response -> validateDetectLanguageResultCollection(true, getExpectedBatchDetectedLanguages(), response)) .verifyComplete()); } /** * Test to detect language for each string input of batch. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageStringInputRunner((inputs) -> StepVerifier.create(client.detectLanguageBatch(inputs, null, null)) .assertNext(response -> validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), response)) .verifyComplete()); } /** * Verifies that a single DetectedLanguage is returned for a document to detectLanguage. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectSingleTextLanguage(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectSingleTextLanguageRunner(input -> StepVerifier.create(client.detectLanguage(input)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageEnglish(), response)) .verifyComplete()); } /** * Verifies that an TextAnalyticsException is thrown for a document with invalid country hint. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageInvalidCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageInvalidCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_COUNTRY_HINT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } /** * Verifies that TextAnalyticsException is thrown for an empty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.detectLanguage(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } /** * Verifies that detectLanguage returns an "UNKNOWN" result when faulty text is passed. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.detectLanguage(input)) .assertNext(response -> validatePrimaryLanguage(getUnknownDetectedLanguage(), response)) .verifyComplete()); } /** * Verifies that a bad request exception is returned for input documents with same ids. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageDuplicateIdRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } /** * Verifies that an invalid document exception is returned for input documents with an empty ID. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageInputEmptyIdRunner(inputs -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } /** * Verify that with countryHint with empty string will not throw exception. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageEmptyCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response)) .verifyComplete()); } /** * Verify that with countryHint with "none" will not throw exception. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageNoneCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageNoneCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntitiesForSingleTextInputRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .assertNext(response -> validateCategorizedEntities(getCategorizedEntitiesList1(), response.stream().collect(Collectors.toList()))) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntitySingleErrorRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .assertNext(resultCollection -> resultCollection.getValue().forEach(recognizeEntitiesResult -> { Exception exception = assertThrows(TextAnalyticsException.class, recognizeEntitiesResult::getEntities); assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizeEntitiesResult"), exception.getMessage()); })).verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntityRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(false, getExpectedBatchCategorizedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(true, getExpectedBatchCategorizedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntityStringInputRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, null)) .assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntitiesLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, language, null)) .assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForListWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeStringBatchCategorizedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, options)) .assertNext(response -> validateCategorizedEntitiesResultCollection(true, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizePiiSingleDocumentRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .assertNext(response -> validatePiiEntities(getPiiEntitiesList1(), response.stream().collect(Collectors.toList()))) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE.equals(throwable.getMessage())) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitySingleErrorRunner((inputs) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .assertNext(resultCollection -> resultCollection.getValue().forEach(recognizePiiEntitiesResult -> { Exception exception = assertThrows(TextAnalyticsException.class, recognizePiiEntitiesResult::getEntities); assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizePiiEntitiesResult"), exception.getMessage()); })).verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitiesRunner((inputs) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(false, getExpectedBatchPiiEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(true, getExpectedBatchPiiEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizePiiLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, language, null)) .assertNext(response -> validatePiiEntitiesResultCollection(false, getExpectedBatchPiiEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeStringBatchPiiEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, null, options)) .assertNext(response -> validatePiiEntitiesResultCollection(true, getExpectedBatchPiiEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedEntitiesForSingleTextInputRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .assertNext(response -> validateLinkedEntity(getLinkedEntitiesList1().get(0), response.iterator().next())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntityRunner((inputs) -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(false, getExpectedBatchLinkedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(true, getExpectedBatchLinkedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedStringInputRunner((inputs) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, null)) .assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, language, null)) .assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchStringLinkedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, options)) .assertNext(response -> validateLinkedEntitiesResultCollection(true, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesForSingleTextInputRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(response -> assertEquals("monde", response.iterator().next())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesDuplicateIdRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesRunner((inputs) -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(false, getExpectedBatchKeyPhrases(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesShowStatsRunner((inputs, options) -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, options)) .assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(true, getExpectedBatchKeyPhrases(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesStringInputRunner((inputs) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesLanguageHintRunner((inputs, language) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, language, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchStringKeyPhrasesShowStatsRunner((inputs, options) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, options)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(true, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesWarning(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesWarningRunner( input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(keyPhrasesResult -> { keyPhrasesResult.getWarnings().forEach(warning -> { assertTrue(WARNING_TOO_LONG_DOCUMENT_INPUT_MESSAGE.equals(warning.getMessage())); assertTrue(LONG_WORDS_IN_DOCUMENT.equals(warning.getWarningCode())); }); }) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesBatchWarning(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesBatchWarningRunner( inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .assertNext(response -> response.getValue().forEach(keyPhrasesResult -> keyPhrasesResult.getKeyPhrases().getWarnings().forEach(warning -> { assertTrue(WARNING_TOO_LONG_DOCUMENT_INPUT_MESSAGE.equals(warning.getMessage())); assertTrue(LONG_WORDS_IN_DOCUMENT.equals(warning.getWarningCode())); }) )) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } /** * Test analyzing sentiment for a string input. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment(TextSentiment.MIXED, new SentimentConfidenceScores(0.0, 0.0, 0.0), new IterableStream<>(Arrays.asList( new SentenceSentiment("", TextSentiment.NEGATIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)), new SentenceSentiment("", TextSentiment.POSITIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)) )), null); StepVerifier .create(client.analyzeSentiment("The hotel was dark and unclean. The restaurant had amazing gnocchi.")) .assertNext(response -> validateAnalyzedSentiment(expectedDocumentSentiment, response)).verifyComplete(); } /** * Verifies that an TextAnalyticsException is thrown for an empty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.analyzeSentiment("")) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify(); } /** * Test analyzing sentiment for a faulty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment( TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0), new IterableStream<>(Arrays.asList( new SentenceSentiment("", TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0)), new SentenceSentiment("", TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0)) )), null); StepVerifier.create(client.analyzeSentiment("!@ .assertNext(response -> validateAnalyzedSentiment(expectedDocumentSentiment, response)).verifyComplete(); } /** * Test analyzing sentiment for a duplicate ID list. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentDuplicateIdRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } /** * Verifies that an invalid document exception is returned for input documents with an empty ID. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } /** * Test analyzing sentiment for a list of string input. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeSentimentStringInputRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, null)) .assertNext(response -> validateSentimentResultCollection(false, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a list of string input with language code. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeSentimentLanguageHintRunner((inputs, language) -> StepVerifier.create(client.analyzeSentimentBatch(inputs, language, null)) .assertNext(response -> validateSentimentResultCollection(false, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a list of string input with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchStringSentimentShowStatsRunner((inputs, options) -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, options)) .assertNext(response -> validateSentimentResultCollection(true, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a batch of documents. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .assertNext(response -> validateSentimentResultCollectionWithResponse(false, getExpectedBatchTextSentiment(), 200, response)) .verifyComplete()); } /** * Verify that we can get statistics on the collection result when given a batch of documents with options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentShowStatsRunner((inputs, options) -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, options)) .assertNext(response -> validateSentimentResultCollectionWithResponse(true, getExpectedBatchTextSentiment(), 200, response)) .verifyComplete()); } /** * Verifies that an InvalidDocumentBatch exception is returned for input documents with too many documents. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } }
class TextAnalyticsAsyncClientTest extends TextAnalyticsClientTestBase { private TextAnalyticsAsyncClient client; @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } private TextAnalyticsAsyncClient getTextAnalyticsAsyncClient(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { return getTextAnalyticsAsyncClientBuilder(httpClient, serviceVersion).buildAsyncClient(); } /** * Verify that we can get statistics on the collection result when given a batch of documents with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageShowStatisticsRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options)) .assertNext(response -> validateDetectLanguageResultCollectionWithResponse(true, getExpectedBatchDetectedLanguages(), 200, response) ) .verifyComplete()); } /** * Test to detect language for each {@code DetectLanguageResult} input of a batch. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageRunner((inputs) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null)) .assertNext(response -> validateDetectLanguageResultCollectionWithResponse(false, getExpectedBatchDetectedLanguages(), 200, response)) .verifyComplete()); } /** * Test to detect language for each string input of batch with given country hint. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchListCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguagesCountryHintRunner((inputs, countryHint) -> StepVerifier.create(client.detectLanguageBatch(inputs, countryHint, null)) .assertNext(actualResults -> validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), actualResults)) .verifyComplete()); } /** * Test to detect language for each string input of batch with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchListCountryHintWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguagesBatchListCountryHintWithOptionsRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatch(inputs, null, options)) .assertNext(response -> validateDetectLanguageResultCollection(true, getExpectedBatchDetectedLanguages(), response)) .verifyComplete()); } /** * Test to detect language for each string input of batch. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageStringInputRunner((inputs) -> StepVerifier.create(client.detectLanguageBatch(inputs, null, null)) .assertNext(response -> validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), response)) .verifyComplete()); } /** * Verifies that a single DetectedLanguage is returned for a document to detectLanguage. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectSingleTextLanguage(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectSingleTextLanguageRunner(input -> StepVerifier.create(client.detectLanguage(input)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageEnglish(), response)) .verifyComplete()); } /** * Verifies that an TextAnalyticsException is thrown for a document with invalid country hint. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageInvalidCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageInvalidCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_COUNTRY_HINT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } /** * Verifies that TextAnalyticsException is thrown for an empty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.detectLanguage(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } /** * Verifies that detectLanguage returns an "UNKNOWN" result when faulty text is passed. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.detectLanguage(input)) .assertNext(response -> validatePrimaryLanguage(getUnknownDetectedLanguage(), response)) .verifyComplete()); } /** * Verifies that a bad request exception is returned for input documents with same ids. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageDuplicateIdRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } /** * Verifies that an invalid document exception is returned for input documents with an empty ID. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageInputEmptyIdRunner(inputs -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } /** * Verify that with countryHint with empty string will not throw exception. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageEmptyCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response)) .verifyComplete()); } /** * Verify that with countryHint with "none" will not throw exception. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageNoneCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageNoneCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntitiesForSingleTextInputRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .assertNext(response -> validateCategorizedEntities(getCategorizedEntitiesList1(), response.stream().collect(Collectors.toList()))) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntitySingleErrorRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .assertNext(resultCollection -> resultCollection.getValue().forEach(recognizeEntitiesResult -> { Exception exception = assertThrows(TextAnalyticsException.class, recognizeEntitiesResult::getEntities); assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizeEntitiesResult"), exception.getMessage()); })).verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntityRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(false, getExpectedBatchCategorizedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(true, getExpectedBatchCategorizedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntityStringInputRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, null)) .assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntitiesLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, language, null)) .assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForListWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeStringBatchCategorizedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, options)) .assertNext(response -> validateCategorizedEntitiesResultCollection(true, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizePiiSingleDocumentRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .assertNext(response -> validatePiiEntities(getPiiEntitiesList1(), response.stream().collect(Collectors.toList()))) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE.equals(throwable.getMessage())) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitySingleErrorRunner((inputs) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .assertNext(resultCollection -> resultCollection.getValue().forEach(recognizePiiEntitiesResult -> { Exception exception = assertThrows(TextAnalyticsException.class, recognizePiiEntitiesResult::getEntities); assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizePiiEntitiesResult"), exception.getMessage()); })).verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitiesRunner((inputs) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(false, getExpectedBatchPiiEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(true, getExpectedBatchPiiEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizePiiLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, language, null)) .assertNext(response -> validatePiiEntitiesResultCollection(false, getExpectedBatchPiiEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeStringBatchPiiEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, null, options)) .assertNext(response -> validatePiiEntitiesResultCollection(true, getExpectedBatchPiiEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedEntitiesForSingleTextInputRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .assertNext(response -> validateLinkedEntity(getLinkedEntitiesList1().get(0), response.iterator().next())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntityRunner((inputs) -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(false, getExpectedBatchLinkedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(true, getExpectedBatchLinkedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedStringInputRunner((inputs) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, null)) .assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, language, null)) .assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchStringLinkedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, options)) .assertNext(response -> validateLinkedEntitiesResultCollection(true, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesForSingleTextInputRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(response -> assertEquals("monde", response.iterator().next())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesDuplicateIdRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesRunner((inputs) -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(false, getExpectedBatchKeyPhrases(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesShowStatsRunner((inputs, options) -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, options)) .assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(true, getExpectedBatchKeyPhrases(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesStringInputRunner((inputs) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesLanguageHintRunner((inputs, language) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, language, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchStringKeyPhrasesShowStatsRunner((inputs, options) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, options)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(true, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesWarning(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesWarningRunner( input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(keyPhrasesResult -> { keyPhrasesResult.getWarnings().forEach(warning -> { assertTrue(WARNING_TOO_LONG_DOCUMENT_INPUT_MESSAGE.equals(warning.getMessage())); assertTrue(LONG_WORDS_IN_DOCUMENT.equals(warning.getWarningCode())); }); }) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesBatchWarning(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesBatchWarningRunner( inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .assertNext(response -> response.getValue().forEach(keyPhrasesResult -> keyPhrasesResult.getKeyPhrases().getWarnings().forEach(warning -> { assertTrue(WARNING_TOO_LONG_DOCUMENT_INPUT_MESSAGE.equals(warning.getMessage())); assertTrue(LONG_WORDS_IN_DOCUMENT.equals(warning.getWarningCode())); }) )) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } /** * Test analyzing sentiment for a string input. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment(TextSentiment.MIXED, new SentimentConfidenceScores(0.0, 0.0, 0.0), new IterableStream<>(Arrays.asList( new SentenceSentiment("", TextSentiment.NEGATIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)), new SentenceSentiment("", TextSentiment.POSITIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)) )), null); StepVerifier .create(client.analyzeSentiment("The hotel was dark and unclean. The restaurant had amazing gnocchi.")) .assertNext(response -> validateAnalyzedSentiment(expectedDocumentSentiment, response)).verifyComplete(); } /** * Verifies that an TextAnalyticsException is thrown for an empty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.analyzeSentiment("")) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify(); } /** * Test analyzing sentiment for a faulty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment( TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0), new IterableStream<>(Arrays.asList( new SentenceSentiment("", TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0)), new SentenceSentiment("", TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0)) )), null); StepVerifier.create(client.analyzeSentiment("!@ .assertNext(response -> validateAnalyzedSentiment(expectedDocumentSentiment, response)).verifyComplete(); } /** * Test analyzing sentiment for a duplicate ID list. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentDuplicateIdRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } /** * Verifies that an invalid document exception is returned for input documents with an empty ID. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } /** * Test analyzing sentiment for a list of string input. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeSentimentStringInputRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, null)) .assertNext(response -> validateSentimentResultCollection(false, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a list of string input with language code. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeSentimentLanguageHintRunner((inputs, language) -> StepVerifier.create(client.analyzeSentimentBatch(inputs, language, null)) .assertNext(response -> validateSentimentResultCollection(false, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a list of string input with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchStringSentimentShowStatsRunner((inputs, options) -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, options)) .assertNext(response -> validateSentimentResultCollection(true, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a batch of documents. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .assertNext(response -> validateSentimentResultCollectionWithResponse(false, getExpectedBatchTextSentiment(), 200, response)) .verifyComplete()); } /** * Verify that we can get statistics on the collection result when given a batch of documents with options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentShowStatsRunner((inputs, options) -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, options)) .assertNext(response -> validateSentimentResultCollectionWithResponse(true, getExpectedBatchTextSentiment(), 200, response)) .verifyComplete()); } /** * Verifies that an InvalidDocumentBatch exception is returned for input documents with too many documents. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } }
I will only assert the error code. Johan's comments in Cognitive Scrum chat: ""Note: we should not (in the general case) be asserting on error messages. I would not consider localizing error messages to be a breaking change, for example..."" and "Yes. I assume that we don't have client library dependencies on the message itself (it's been done in the past, and it has not ended well)"
public void recognizeEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); }
public void recognizeEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); }
class TextAnalyticsAsyncClientTest extends TextAnalyticsClientTestBase { private TextAnalyticsAsyncClient client; @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } private TextAnalyticsAsyncClient getTextAnalyticsAsyncClient(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { return getTextAnalyticsAsyncClientBuilder(httpClient, serviceVersion).buildAsyncClient(); } /** * Verify that we can get statistics on the collection result when given a batch of documents with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageShowStatisticsRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options)) .assertNext(response -> validateDetectLanguageResultCollectionWithResponse(true, getExpectedBatchDetectedLanguages(), 200, response) ) .verifyComplete()); } /** * Test to detect language for each {@code DetectLanguageResult} input of a batch. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageRunner((inputs) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null)) .assertNext(response -> validateDetectLanguageResultCollectionWithResponse(false, getExpectedBatchDetectedLanguages(), 200, response)) .verifyComplete()); } /** * Test to detect language for each string input of batch with given country hint. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchListCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguagesCountryHintRunner((inputs, countryHint) -> StepVerifier.create(client.detectLanguageBatch(inputs, countryHint, null)) .assertNext(actualResults -> validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), actualResults)) .verifyComplete()); } /** * Test to detect language for each string input of batch with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchListCountryHintWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguagesBatchListCountryHintWithOptionsRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatch(inputs, null, options)) .assertNext(response -> validateDetectLanguageResultCollection(true, getExpectedBatchDetectedLanguages(), response)) .verifyComplete()); } /** * Test to detect language for each string input of batch. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageStringInputRunner((inputs) -> StepVerifier.create(client.detectLanguageBatch(inputs, null, null)) .assertNext(response -> validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), response)) .verifyComplete()); } /** * Verifies that a single DetectedLanguage is returned for a document to detectLanguage. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectSingleTextLanguage(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectSingleTextLanguageRunner(input -> StepVerifier.create(client.detectLanguage(input)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageEnglish(), response)) .verifyComplete()); } /** * Verifies that an TextAnalyticsException is thrown for a document with invalid country hint. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageInvalidCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageInvalidCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_COUNTRY_HINT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } /** * Verifies that TextAnalyticsException is thrown for an empty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.detectLanguage(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } /** * Verifies that detectLanguage returns an "UNKNOWN" result when faulty text is passed. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.detectLanguage(input)) .assertNext(response -> validatePrimaryLanguage(getUnknownDetectedLanguage(), response)) .verifyComplete()); } /** * Verifies that a bad request exception is returned for input documents with same ids. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageDuplicateIdRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } /** * Verifies that an invalid document exception is returned for input documents with an empty ID. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageInputEmptyIdRunner(inputs -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } /** * Verify that with countryHint with empty string will not throw exception. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageEmptyCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response)) .verifyComplete()); } /** * Verify that with countryHint with "none" will not throw exception. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageNoneCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageNoneCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntitiesForSingleTextInputRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .assertNext(response -> validateCategorizedEntities(getCategorizedEntitiesList1(), response.stream().collect(Collectors.toList()))) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntitySingleErrorRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .assertNext(resultCollection -> resultCollection.getValue().forEach(recognizeEntitiesResult -> { Exception exception = assertThrows(TextAnalyticsException.class, recognizeEntitiesResult::getEntities); assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizeEntitiesResult"), exception.getMessage()); })).verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntityRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(false, getExpectedBatchCategorizedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(true, getExpectedBatchCategorizedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntityStringInputRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, null)) .assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntitiesLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, language, null)) .assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForListWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeStringBatchCategorizedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, options)) .assertNext(response -> validateCategorizedEntitiesResultCollection(true, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizePiiSingleDocumentRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .assertNext(response -> validatePiiEntities(getPiiEntitiesList1(), response.stream().collect(Collectors.toList()))) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE.equals(throwable.getMessage())) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitySingleErrorRunner((inputs) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .assertNext(resultCollection -> resultCollection.getValue().forEach(recognizePiiEntitiesResult -> { Exception exception = assertThrows(TextAnalyticsException.class, recognizePiiEntitiesResult::getEntities); assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizePiiEntitiesResult"), exception.getMessage()); })).verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitiesRunner((inputs) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(false, getExpectedBatchPiiEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(true, getExpectedBatchPiiEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizePiiLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, language, null)) .assertNext(response -> validatePiiEntitiesResultCollection(false, getExpectedBatchPiiEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeStringBatchPiiEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, null, options)) .assertNext(response -> validatePiiEntitiesResultCollection(true, getExpectedBatchPiiEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedEntitiesForSingleTextInputRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .assertNext(response -> validateLinkedEntity(getLinkedEntitiesList1().get(0), response.iterator().next())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntityRunner((inputs) -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(false, getExpectedBatchLinkedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(true, getExpectedBatchLinkedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedStringInputRunner((inputs) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, null)) .assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, language, null)) .assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchStringLinkedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, options)) .assertNext(response -> validateLinkedEntitiesResultCollection(true, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesForSingleTextInputRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(response -> assertEquals("monde", response.iterator().next())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesDuplicateIdRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesRunner((inputs) -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(false, getExpectedBatchKeyPhrases(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesShowStatsRunner((inputs, options) -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, options)) .assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(true, getExpectedBatchKeyPhrases(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesStringInputRunner((inputs) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesLanguageHintRunner((inputs, language) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, language, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchStringKeyPhrasesShowStatsRunner((inputs, options) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, options)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(true, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesWarning(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesWarningRunner( input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(keyPhrasesResult -> { keyPhrasesResult.getWarnings().forEach(warning -> { assertTrue(WARNING_TOO_LONG_DOCUMENT_INPUT_MESSAGE.equals(warning.getMessage())); assertTrue(LONG_WORDS_IN_DOCUMENT.equals(warning.getWarningCode())); }); }) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesBatchWarning(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesBatchWarningRunner( inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .assertNext(response -> response.getValue().forEach(keyPhrasesResult -> keyPhrasesResult.getKeyPhrases().getWarnings().forEach(warning -> { assertTrue(WARNING_TOO_LONG_DOCUMENT_INPUT_MESSAGE.equals(warning.getMessage())); assertTrue(LONG_WORDS_IN_DOCUMENT.equals(warning.getWarningCode())); }) )) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } /** * Test analyzing sentiment for a string input. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment(TextSentiment.MIXED, new SentimentConfidenceScores(0.0, 0.0, 0.0), new IterableStream<>(Arrays.asList( new SentenceSentiment("", TextSentiment.NEGATIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)), new SentenceSentiment("", TextSentiment.POSITIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)) )), null); StepVerifier .create(client.analyzeSentiment("The hotel was dark and unclean. The restaurant had amazing gnocchi.")) .assertNext(response -> validateAnalyzedSentiment(expectedDocumentSentiment, response)).verifyComplete(); } /** * Verifies that an TextAnalyticsException is thrown for an empty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.analyzeSentiment("")) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify(); } /** * Test analyzing sentiment for a faulty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment( TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0), new IterableStream<>(Arrays.asList( new SentenceSentiment("", TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0)), new SentenceSentiment("", TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0)) )), null); StepVerifier.create(client.analyzeSentiment("!@ .assertNext(response -> validateAnalyzedSentiment(expectedDocumentSentiment, response)).verifyComplete(); } /** * Test analyzing sentiment for a duplicate ID list. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentDuplicateIdRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } /** * Verifies that an invalid document exception is returned for input documents with an empty ID. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } /** * Test analyzing sentiment for a list of string input. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeSentimentStringInputRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, null)) .assertNext(response -> validateSentimentResultCollection(false, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a list of string input with language code. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeSentimentLanguageHintRunner((inputs, language) -> StepVerifier.create(client.analyzeSentimentBatch(inputs, language, null)) .assertNext(response -> validateSentimentResultCollection(false, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a list of string input with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchStringSentimentShowStatsRunner((inputs, options) -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, options)) .assertNext(response -> validateSentimentResultCollection(true, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a batch of documents. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .assertNext(response -> validateSentimentResultCollectionWithResponse(false, getExpectedBatchTextSentiment(), 200, response)) .verifyComplete()); } /** * Verify that we can get statistics on the collection result when given a batch of documents with options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentShowStatsRunner((inputs, options) -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, options)) .assertNext(response -> validateSentimentResultCollectionWithResponse(true, getExpectedBatchTextSentiment(), 200, response)) .verifyComplete()); } /** * Verifies that an InvalidDocumentBatch exception is returned for input documents with too many documents. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } }
class TextAnalyticsAsyncClientTest extends TextAnalyticsClientTestBase { private TextAnalyticsAsyncClient client; @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } private TextAnalyticsAsyncClient getTextAnalyticsAsyncClient(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { return getTextAnalyticsAsyncClientBuilder(httpClient, serviceVersion).buildAsyncClient(); } /** * Verify that we can get statistics on the collection result when given a batch of documents with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageShowStatisticsRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options)) .assertNext(response -> validateDetectLanguageResultCollectionWithResponse(true, getExpectedBatchDetectedLanguages(), 200, response) ) .verifyComplete()); } /** * Test to detect language for each {@code DetectLanguageResult} input of a batch. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageRunner((inputs) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null)) .assertNext(response -> validateDetectLanguageResultCollectionWithResponse(false, getExpectedBatchDetectedLanguages(), 200, response)) .verifyComplete()); } /** * Test to detect language for each string input of batch with given country hint. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchListCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguagesCountryHintRunner((inputs, countryHint) -> StepVerifier.create(client.detectLanguageBatch(inputs, countryHint, null)) .assertNext(actualResults -> validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), actualResults)) .verifyComplete()); } /** * Test to detect language for each string input of batch with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchListCountryHintWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguagesBatchListCountryHintWithOptionsRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatch(inputs, null, options)) .assertNext(response -> validateDetectLanguageResultCollection(true, getExpectedBatchDetectedLanguages(), response)) .verifyComplete()); } /** * Test to detect language for each string input of batch. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguagesBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageStringInputRunner((inputs) -> StepVerifier.create(client.detectLanguageBatch(inputs, null, null)) .assertNext(response -> validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), response)) .verifyComplete()); } /** * Verifies that a single DetectedLanguage is returned for a document to detectLanguage. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectSingleTextLanguage(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectSingleTextLanguageRunner(input -> StepVerifier.create(client.detectLanguage(input)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageEnglish(), response)) .verifyComplete()); } /** * Verifies that an TextAnalyticsException is thrown for a document with invalid country hint. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageInvalidCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageInvalidCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_COUNTRY_HINT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } /** * Verifies that TextAnalyticsException is thrown for an empty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.detectLanguage(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } /** * Verifies that detectLanguage returns an "UNKNOWN" result when faulty text is passed. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.detectLanguage(input)) .assertNext(response -> validatePrimaryLanguage(getUnknownDetectedLanguage(), response)) .verifyComplete()); } /** * Verifies that a bad request exception is returned for input documents with same ids. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageDuplicateIdRunner((inputs, options) -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } /** * Verifies that an invalid document exception is returned for input documents with an empty ID. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageInputEmptyIdRunner(inputs -> StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } /** * Verify that with countryHint with empty string will not throw exception. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageEmptyCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageEmptyCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response)) .verifyComplete()); } /** * Verify that with countryHint with "none" will not throw exception. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void detectLanguageNoneCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); detectLanguageNoneCountryHintRunner((input, countryHint) -> StepVerifier.create(client.detectLanguage(input, countryHint)) .assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntitiesForSingleTextInputRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .assertNext(response -> validateCategorizedEntities(getCategorizedEntitiesList1(), response.stream().collect(Collectors.toList()))) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.recognizeEntities(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntitySingleErrorRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .assertNext(resultCollection -> resultCollection.getValue().forEach(recognizeEntitiesResult -> { Exception exception = assertThrows(TextAnalyticsException.class, recognizeEntitiesResult::getEntities); assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizeEntitiesResult"), exception.getMessage()); })).verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntityRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(false, getExpectedBatchCategorizedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchCategorizedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(true, getExpectedBatchCategorizedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntityStringInputRunner((inputs) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, null)) .assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeCategorizedEntitiesLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, language, null)) .assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeEntitiesForListWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeStringBatchCategorizedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, options)) .assertNext(response -> validateCategorizedEntitiesResultCollection(true, getExpectedBatchCategorizedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizePiiSingleDocumentRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .assertNext(response -> validatePiiEntities(getPiiEntitiesList1(), response.stream().collect(Collectors.toList()))) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE.equals(throwable.getMessage())) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(document -> StepVerifier.create(client.recognizePiiEntities(document)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitySingleErrorRunner((inputs) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .assertNext(resultCollection -> resultCollection.getValue().forEach(recognizePiiEntitiesResult -> { Exception exception = assertThrows(TextAnalyticsException.class, recognizePiiEntitiesResult::getEntities); assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizePiiEntitiesResult"), exception.getMessage()); })).verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitiesRunner((inputs) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(false, getExpectedBatchPiiEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchPiiEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(true, getExpectedBatchPiiEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizePiiLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, language, null)) .assertNext(response -> validatePiiEntitiesResultCollection(false, getExpectedBatchPiiEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeStringBatchPiiEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, null, options)) .assertNext(response -> validatePiiEntitiesResultCollection(true, getExpectedBatchPiiEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizePiiEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedEntitiesForSingleTextInputRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .assertNext(response -> validateLinkedEntity(getLinkedEntitiesList1().get(0), response.iterator().next())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.recognizeLinkedEntities(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntityDuplicateIdRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntityRunner((inputs) -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null)) .assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(false, getExpectedBatchLinkedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchLinkedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, options)) .assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(true, getExpectedBatchLinkedEntities(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedStringInputRunner((inputs) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, null)) .assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeLinkedLanguageHintRunner((inputs, language) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, language, null)) .assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); recognizeBatchStringLinkedEntitiesShowStatsRunner((inputs, options) -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, options)) .assertNext(response -> validateLinkedEntitiesResultCollection(true, getExpectedBatchLinkedEntities(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void recognizeLinkedEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesForSingleTextInputRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(response -> assertEquals("monde", response.iterator().next())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); emptyTextRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); faultyTextRunner(input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(result -> assertFalse(result.getWarnings().iterator().hasNext())) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesDuplicateIdRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesRunner((inputs) -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(false, getExpectedBatchKeyPhrases(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchKeyPhrasesShowStatsRunner((inputs, options) -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, options)) .assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(true, getExpectedBatchKeyPhrases(), 200, response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesStringInputRunner((inputs) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesLanguageHintRunner((inputs, language) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, language, null)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractBatchStringKeyPhrasesShowStatsRunner((inputs, options) -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, options)) .assertNext(response -> validateExtractKeyPhrasesResultCollection(true, getExpectedBatchKeyPhrases(), response)) .verifyComplete()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesWarning(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesWarningRunner( input -> StepVerifier.create(client.extractKeyPhrases(input)) .assertNext(keyPhrasesResult -> { keyPhrasesResult.getWarnings().forEach(warning -> { assertTrue(WARNING_TOO_LONG_DOCUMENT_INPUT_MESSAGE.equals(warning.getMessage())); assertTrue(LONG_WORDS_IN_DOCUMENT.equals(warning.getWarningCode())); }); }) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesBatchWarning(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); extractKeyPhrasesBatchWarningRunner( inputs -> StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null)) .assertNext(response -> response.getValue().forEach(keyPhrasesResult -> keyPhrasesResult.getKeyPhrases().getWarnings().forEach(warning -> { assertTrue(WARNING_TOO_LONG_DOCUMENT_INPUT_MESSAGE.equals(warning.getMessage())); assertTrue(LONG_WORDS_IN_DOCUMENT.equals(warning.getWarningCode())); }) )) .verifyComplete() ); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void extractKeyPhrasesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } /** * Test analyzing sentiment for a string input. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment(TextSentiment.MIXED, new SentimentConfidenceScores(0.0, 0.0, 0.0), new IterableStream<>(Arrays.asList( new SentenceSentiment("", TextSentiment.NEGATIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)), new SentenceSentiment("", TextSentiment.POSITIVE, new SentimentConfidenceScores(0.0, 0.0, 0.0)) )), null); StepVerifier .create(client.analyzeSentiment("The hotel was dark and unclean. The restaurant had amazing gnocchi.")) .assertNext(response -> validateAnalyzedSentiment(expectedDocumentSentiment, response)).verifyComplete(); } /** * Verifies that an TextAnalyticsException is thrown for an empty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); StepVerifier.create(client.analyzeSentiment("")) .expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException && throwable.getMessage().equals(INVALID_DOCUMENT_EXPECTED_EXCEPTION_MESSAGE)) .verify(); } /** * Test analyzing sentiment for a faulty document. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForFaultyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); final DocumentSentiment expectedDocumentSentiment = new DocumentSentiment( TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0), new IterableStream<>(Arrays.asList( new SentenceSentiment("", TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0)), new SentenceSentiment("", TextSentiment.NEUTRAL, new SentimentConfidenceScores(0.0, 0.0, 0.0)) )), null); StepVerifier.create(client.analyzeSentiment("!@ .assertNext(response -> validateAnalyzedSentiment(expectedDocumentSentiment, response)).verifyComplete(); } /** * Test analyzing sentiment for a duplicate ID list. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentDuplicateIdRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))); } /** * Verifies that an invalid document exception is returned for input documents with an empty ID. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); textAnalyticsInputEmptyIdRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } /** * Test analyzing sentiment for a list of string input. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeSentimentStringInputRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, null)) .assertNext(response -> validateSentimentResultCollection(false, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a list of string input with language code. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeSentimentLanguageHintRunner((inputs, language) -> StepVerifier.create(client.analyzeSentimentBatch(inputs, language, null)) .assertNext(response -> validateSentimentResultCollection(false, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a list of string input with request options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchStringSentimentShowStatsRunner((inputs, options) -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, options)) .assertNext(response -> validateSentimentResultCollection(true, getExpectedBatchTextSentiment(), response)) .verifyComplete()); } /** * Test analyzing sentiment for a batch of documents. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null)) .assertNext(response -> validateSentimentResultCollectionWithResponse(false, getExpectedBatchTextSentiment(), 200, response)) .verifyComplete()); } /** * Verify that we can get statistics on the collection result when given a batch of documents with options. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); analyzeBatchSentimentShowStatsRunner((inputs, options) -> StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, options)) .assertNext(response -> validateSentimentResultCollectionWithResponse(true, getExpectedBatchTextSentiment(), 200, response)) .verifyComplete()); } /** * Verifies that an InvalidDocumentBatch exception is returned for input documents with too many documents. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.textanalytics.TestUtils public void analyzeSentimentBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) { client = getTextAnalyticsAsyncClient(httpClient, serviceVersion); tooManyDocumentsRunner(inputs -> StepVerifier.create(client.analyzeSentimentBatch(inputs, null, null)) .verifyErrorSatisfies(ex -> { HttpResponseException httpResponseException = (HttpResponseException) ex; assertEquals(400, httpResponseException.getResponse().getStatusCode()); TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue(); })); } }
```suggestion return cosmosItem; ``` on this if branch the item is InternalObjectNode you don't need to serialze and deserialize again.
public static InternalObjectNode fromObjectToInternalObjectNode(Object cosmosItem) { if (cosmosItem instanceof InternalObjectNode) { return new InternalObjectNode(((InternalObjectNode) cosmosItem).toJson()); } else if (cosmosItem instanceof byte[]) { return new InternalObjectNode((byte[]) cosmosItem); } else { try { return new InternalObjectNode(InternalObjectNode.MAPPER.writeValueAsString(cosmosItem)); } catch (IOException e) { throw new IllegalArgumentException("Can't serialize the object into the json string", e); } } }
return new InternalObjectNode(((InternalObjectNode) cosmosItem).toJson());
public static InternalObjectNode fromObjectToInternalObjectNode(Object cosmosItem) { if (cosmosItem instanceof InternalObjectNode) { return (InternalObjectNode) cosmosItem; } else if (cosmosItem instanceof byte[]) { return new InternalObjectNode((byte[]) cosmosItem); } else { try { return new InternalObjectNode(InternalObjectNode.MAPPER.writeValueAsString(cosmosItem)); } catch (IOException e) { throw new IllegalArgumentException("Can't serialize the object into the json string", e); } } }
class InternalObjectNode extends Resource { private static final ObjectMapper MAPPER = Utils.getSimpleObjectMapper(); /** * Initialize an empty InternalObjectNode object. */ public InternalObjectNode() { } /** * Initialize a InternalObjectNode object from json string. * * @param bytes the json string that represents the document object. */ public InternalObjectNode(byte[] bytes) { super(bytes); } /** * Initialize a InternalObjectNode object from json string. * * @param byteBuffer the json string that represents the document object. */ public InternalObjectNode(ByteBuffer byteBuffer) { super(byteBuffer); } /** * Sets the id * * @param id the name of the resource. * @return the cosmos item properties with id set */ public InternalObjectNode setId(String id) { super.setId(id); return this; } /** * Initialize a InternalObjectNode object from json string. * * @param jsonString the json string that represents the document object. */ public InternalObjectNode(String jsonString) { super(jsonString); } public InternalObjectNode(ObjectNode propertyBag) { super(propertyBag); } /** * fromObjectToInternalObjectNode returns InternalObjectNode */ /** * fromObject returns Document for compatibility with V2 sdk */ public static Document fromObject(Object cosmosItem) { Document typedItem; if (cosmosItem instanceof InternalObjectNode) { typedItem = new Document(((InternalObjectNode) cosmosItem).toJson()); } else if (cosmosItem instanceof byte[]) { return new Document((byte[]) cosmosItem); } else { try { return new Document(InternalObjectNode.MAPPER.writeValueAsString(cosmosItem)); } catch (IOException e) { throw new IllegalArgumentException("Can't serialize the object into the json string", e); } } return typedItem; } public static ByteBuffer serializeJsonToByteBuffer(Object cosmosItem, ObjectMapper objectMapper) { if (cosmosItem instanceof InternalObjectNode) { return ((InternalObjectNode) cosmosItem).serializeJsonToByteBuffer(); } else if (cosmosItem instanceof Document) { return ModelBridgeInternal.serializeJsonToByteBuffer((Document) cosmosItem); } else if (cosmosItem instanceof byte[]) { return ByteBuffer.wrap((byte[]) cosmosItem); } else { return Utils.serializeJsonToByteBuffer(objectMapper, cosmosItem); } } static <T> List<T> getTypedResultsFromV2Results(List<Document> results, Class<T> klass) { return results.stream().map(document -> ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)) .collect(Collectors.toList()); } /** * Gets object. * * @param <T> the type parameter * @param klass the klass * @return the object * @throws IOException the io exception */ public <T> T getObject(Class<T> klass) throws IOException { return MAPPER.readValue(this.toJson(), klass); } }
class InternalObjectNode extends Resource { private static final ObjectMapper MAPPER = Utils.getSimpleObjectMapper(); /** * Initialize an empty InternalObjectNode object. */ public InternalObjectNode() { } /** * Initialize a InternalObjectNode object from json string. * * @param bytes the json string that represents the document object. */ public InternalObjectNode(byte[] bytes) { super(bytes); } /** * Initialize a InternalObjectNode object from json string. * * @param byteBuffer the json string that represents the document object. */ public InternalObjectNode(ByteBuffer byteBuffer) { super(byteBuffer); } /** * Sets the id * * @param id the name of the resource. * @return the cosmos item properties with id set */ public InternalObjectNode setId(String id) { super.setId(id); return this; } /** * Initialize a InternalObjectNode object from json string. * * @param jsonString the json string that represents the document object. */ public InternalObjectNode(String jsonString) { super(jsonString); } public InternalObjectNode(ObjectNode propertyBag) { super(propertyBag); } /** * fromObjectToInternalObjectNode returns InternalObjectNode */ /** * fromObject returns Document for compatibility with V2 sdk */ public static Document fromObject(Object cosmosItem) { Document typedItem; if (cosmosItem instanceof InternalObjectNode) { typedItem = new Document(((InternalObjectNode) cosmosItem).toJson()); } else if (cosmosItem instanceof byte[]) { return new Document((byte[]) cosmosItem); } else { try { return new Document(InternalObjectNode.MAPPER.writeValueAsString(cosmosItem)); } catch (IOException e) { throw new IllegalArgumentException("Can't serialize the object into the json string", e); } } return typedItem; } public static ByteBuffer serializeJsonToByteBuffer(Object cosmosItem, ObjectMapper objectMapper) { if (cosmosItem instanceof InternalObjectNode) { return ((InternalObjectNode) cosmosItem).serializeJsonToByteBuffer(); } else if (cosmosItem instanceof Document) { return ModelBridgeInternal.serializeJsonToByteBuffer((Document) cosmosItem); } else if (cosmosItem instanceof byte[]) { return ByteBuffer.wrap((byte[]) cosmosItem); } else { return Utils.serializeJsonToByteBuffer(objectMapper, cosmosItem); } } static <T> List<T> getTypedResultsFromV2Results(List<Document> results, Class<T> klass) { return results.stream().map(document -> ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)) .collect(Collectors.toList()); } /** * Gets object. * * @param <T> the type parameter * @param klass the klass * @return the object * @throws IOException the io exception */ public <T> T getObject(Class<T> klass) throws IOException { return MAPPER.readValue(this.toJson(), klass); } }
Good catch, fixed.
public static InternalObjectNode fromObjectToInternalObjectNode(Object cosmosItem) { if (cosmosItem instanceof InternalObjectNode) { return new InternalObjectNode(((InternalObjectNode) cosmosItem).toJson()); } else if (cosmosItem instanceof byte[]) { return new InternalObjectNode((byte[]) cosmosItem); } else { try { return new InternalObjectNode(InternalObjectNode.MAPPER.writeValueAsString(cosmosItem)); } catch (IOException e) { throw new IllegalArgumentException("Can't serialize the object into the json string", e); } } }
return new InternalObjectNode(((InternalObjectNode) cosmosItem).toJson());
public static InternalObjectNode fromObjectToInternalObjectNode(Object cosmosItem) { if (cosmosItem instanceof InternalObjectNode) { return (InternalObjectNode) cosmosItem; } else if (cosmosItem instanceof byte[]) { return new InternalObjectNode((byte[]) cosmosItem); } else { try { return new InternalObjectNode(InternalObjectNode.MAPPER.writeValueAsString(cosmosItem)); } catch (IOException e) { throw new IllegalArgumentException("Can't serialize the object into the json string", e); } } }
class InternalObjectNode extends Resource { private static final ObjectMapper MAPPER = Utils.getSimpleObjectMapper(); /** * Initialize an empty InternalObjectNode object. */ public InternalObjectNode() { } /** * Initialize a InternalObjectNode object from json string. * * @param bytes the json string that represents the document object. */ public InternalObjectNode(byte[] bytes) { super(bytes); } /** * Initialize a InternalObjectNode object from json string. * * @param byteBuffer the json string that represents the document object. */ public InternalObjectNode(ByteBuffer byteBuffer) { super(byteBuffer); } /** * Sets the id * * @param id the name of the resource. * @return the cosmos item properties with id set */ public InternalObjectNode setId(String id) { super.setId(id); return this; } /** * Initialize a InternalObjectNode object from json string. * * @param jsonString the json string that represents the document object. */ public InternalObjectNode(String jsonString) { super(jsonString); } public InternalObjectNode(ObjectNode propertyBag) { super(propertyBag); } /** * fromObjectToInternalObjectNode returns InternalObjectNode */ /** * fromObject returns Document for compatibility with V2 sdk */ public static Document fromObject(Object cosmosItem) { Document typedItem; if (cosmosItem instanceof InternalObjectNode) { typedItem = new Document(((InternalObjectNode) cosmosItem).toJson()); } else if (cosmosItem instanceof byte[]) { return new Document((byte[]) cosmosItem); } else { try { return new Document(InternalObjectNode.MAPPER.writeValueAsString(cosmosItem)); } catch (IOException e) { throw new IllegalArgumentException("Can't serialize the object into the json string", e); } } return typedItem; } public static ByteBuffer serializeJsonToByteBuffer(Object cosmosItem, ObjectMapper objectMapper) { if (cosmosItem instanceof InternalObjectNode) { return ((InternalObjectNode) cosmosItem).serializeJsonToByteBuffer(); } else if (cosmosItem instanceof Document) { return ModelBridgeInternal.serializeJsonToByteBuffer((Document) cosmosItem); } else if (cosmosItem instanceof byte[]) { return ByteBuffer.wrap((byte[]) cosmosItem); } else { return Utils.serializeJsonToByteBuffer(objectMapper, cosmosItem); } } static <T> List<T> getTypedResultsFromV2Results(List<Document> results, Class<T> klass) { return results.stream().map(document -> ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)) .collect(Collectors.toList()); } /** * Gets object. * * @param <T> the type parameter * @param klass the klass * @return the object * @throws IOException the io exception */ public <T> T getObject(Class<T> klass) throws IOException { return MAPPER.readValue(this.toJson(), klass); } }
class InternalObjectNode extends Resource { private static final ObjectMapper MAPPER = Utils.getSimpleObjectMapper(); /** * Initialize an empty InternalObjectNode object. */ public InternalObjectNode() { } /** * Initialize a InternalObjectNode object from json string. * * @param bytes the json string that represents the document object. */ public InternalObjectNode(byte[] bytes) { super(bytes); } /** * Initialize a InternalObjectNode object from json string. * * @param byteBuffer the json string that represents the document object. */ public InternalObjectNode(ByteBuffer byteBuffer) { super(byteBuffer); } /** * Sets the id * * @param id the name of the resource. * @return the cosmos item properties with id set */ public InternalObjectNode setId(String id) { super.setId(id); return this; } /** * Initialize a InternalObjectNode object from json string. * * @param jsonString the json string that represents the document object. */ public InternalObjectNode(String jsonString) { super(jsonString); } public InternalObjectNode(ObjectNode propertyBag) { super(propertyBag); } /** * fromObjectToInternalObjectNode returns InternalObjectNode */ /** * fromObject returns Document for compatibility with V2 sdk */ public static Document fromObject(Object cosmosItem) { Document typedItem; if (cosmosItem instanceof InternalObjectNode) { typedItem = new Document(((InternalObjectNode) cosmosItem).toJson()); } else if (cosmosItem instanceof byte[]) { return new Document((byte[]) cosmosItem); } else { try { return new Document(InternalObjectNode.MAPPER.writeValueAsString(cosmosItem)); } catch (IOException e) { throw new IllegalArgumentException("Can't serialize the object into the json string", e); } } return typedItem; } public static ByteBuffer serializeJsonToByteBuffer(Object cosmosItem, ObjectMapper objectMapper) { if (cosmosItem instanceof InternalObjectNode) { return ((InternalObjectNode) cosmosItem).serializeJsonToByteBuffer(); } else if (cosmosItem instanceof Document) { return ModelBridgeInternal.serializeJsonToByteBuffer((Document) cosmosItem); } else if (cosmosItem instanceof byte[]) { return ByteBuffer.wrap((byte[]) cosmosItem); } else { return Utils.serializeJsonToByteBuffer(objectMapper, cosmosItem); } } static <T> List<T> getTypedResultsFromV2Results(List<Document> results, Class<T> klass) { return results.stream().map(document -> ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)) .collect(Collectors.toList()); } /** * Gets object. * * @param <T> the type parameter * @param klass the klass * @return the object * @throws IOException the io exception */ public <T> T getObject(Class<T> klass) throws IOException { return MAPPER.readValue(this.toJson(), klass); } }
To keep same code style, please use `information.getPartitionKeyPath()` directly instead of define `partitionKeyPath`.
public CosmosContainerProperties createContainerIfNotExists(CosmosEntityInformation<?, ?> information) { final String partitionKeyPath = information.getPartitionKeyPath(); final CosmosContainerResponse response = cosmosAsyncClient .createDatabaseIfNotExists(this.databaseName) .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create database", throwable)) .flatMap(cosmosDatabaseResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosDatabaseResponse.getDiagnostics(), null); final CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(information.getContainerName(), partitionKeyPath); cosmosContainerProperties.setDefaultTimeToLiveInSeconds(information.getTimeToLive()); cosmosContainerProperties.setIndexingPolicy(information.getIndexingPolicy()); CosmosAsyncDatabase cosmosAsyncDatabase = cosmosAsyncClient .getDatabase(cosmosDatabaseResponse.getProperties().getId()); Mono<CosmosContainerResponse> cosmosContainerResponseMono; if (information.getRequestUnit() == null) { cosmosContainerResponseMono = cosmosAsyncDatabase.createContainerIfNotExists(cosmosContainerProperties); } else { ThroughputProperties throughputProperties = ThroughputProperties.createManualThroughput(information.getRequestUnit()); cosmosContainerResponseMono = cosmosAsyncDatabase.createContainerIfNotExists(cosmosContainerProperties, throughputProperties); } return cosmosContainerResponseMono .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create container", throwable)) .doOnNext(cosmosContainerResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null)); }) .block(); assert response != null; return response.getProperties(); }
new CosmosContainerProperties(information.getContainerName(), partitionKeyPath);
public CosmosContainerProperties createContainerIfNotExists(CosmosEntityInformation<?, ?> information) { final CosmosContainerResponse response = cosmosAsyncClient .createDatabaseIfNotExists(this.databaseName) .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create database", throwable)) .flatMap(cosmosDatabaseResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosDatabaseResponse.getDiagnostics(), null); final CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(information.getContainerName(), information.getPartitionKeyPath()); cosmosContainerProperties.setDefaultTimeToLiveInSeconds(information.getTimeToLive()); cosmosContainerProperties.setIndexingPolicy(information.getIndexingPolicy()); CosmosAsyncDatabase cosmosAsyncDatabase = cosmosAsyncClient .getDatabase(cosmosDatabaseResponse.getProperties().getId()); Mono<CosmosContainerResponse> cosmosContainerResponseMono; if (information.getRequestUnit() == null) { cosmosContainerResponseMono = cosmosAsyncDatabase.createContainerIfNotExists(cosmosContainerProperties); } else { ThroughputProperties throughputProperties = ThroughputProperties.createManualThroughput(information.getRequestUnit()); cosmosContainerResponseMono = cosmosAsyncDatabase.createContainerIfNotExists(cosmosContainerProperties, throughputProperties); } return cosmosContainerResponseMono .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create container", throwable)) .doOnNext(cosmosContainerResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null)); }) .block(); assert response != null; return response.getProperties(); }
class type of domain * @return found results in a List */ public <T> Iterable<T> findAll(String containerName, final Class<T> domainType) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(domainType, "domainType should not be null"); final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return findItems(query, containerName, domainType); }
class type of domain * @return found results in a List */ public <T> Iterable<T> findAll(String containerName, final Class<T> domainType) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(domainType, "domainType should not be null"); final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return findItems(query, containerName, domainType); }
To keep same code style, please use `information.getPartitionKeyPath()` directly instead of define `partitionKeyPath`.
public Mono<CosmosContainerResponse> createContainerIfNotExists(CosmosEntityInformation<?, ?> information) { final String partitionKeyPath = information.getPartitionKeyPath(); return cosmosAsyncClient .createDatabaseIfNotExists(this.databaseName) .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create database", throwable)) .flatMap(cosmosDatabaseResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosDatabaseResponse.getDiagnostics(), null); final CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(information.getContainerName(), partitionKeyPath); cosmosContainerProperties.setDefaultTimeToLiveInSeconds(information.getTimeToLive()); cosmosContainerProperties.setIndexingPolicy(information.getIndexingPolicy()); CosmosAsyncDatabase database = cosmosAsyncClient.getDatabase(cosmosDatabaseResponse.getProperties().getId()); Mono<CosmosContainerResponse> cosmosContainerResponseMono; if (information.getRequestUnit() == null) { cosmosContainerResponseMono = database.createContainerIfNotExists(cosmosContainerProperties); } else { ThroughputProperties throughputProperties = ThroughputProperties.createManualThroughput(information.getRequestUnit()); cosmosContainerResponseMono = database.createContainerIfNotExists(cosmosContainerProperties, throughputProperties); } return cosmosContainerResponseMono .map(cosmosContainerResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null); return cosmosContainerResponse; }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create container", throwable)); }); }
new CosmosContainerProperties(information.getContainerName(), partitionKeyPath);
public Mono<CosmosContainerResponse> createContainerIfNotExists(CosmosEntityInformation<?, ?> information) { return cosmosAsyncClient .createDatabaseIfNotExists(this.databaseName) .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create database", throwable)) .flatMap(cosmosDatabaseResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosDatabaseResponse.getDiagnostics(), null); final CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(information.getContainerName(), information.getPartitionKeyPath()); cosmosContainerProperties.setDefaultTimeToLiveInSeconds(information.getTimeToLive()); cosmosContainerProperties.setIndexingPolicy(information.getIndexingPolicy()); CosmosAsyncDatabase database = cosmosAsyncClient.getDatabase(cosmosDatabaseResponse.getProperties().getId()); Mono<CosmosContainerResponse> cosmosContainerResponseMono; if (information.getRequestUnit() == null) { cosmosContainerResponseMono = database.createContainerIfNotExists(cosmosContainerProperties); } else { ThroughputProperties throughputProperties = ThroughputProperties.createManualThroughput(information.getRequestUnit()); cosmosContainerResponseMono = database.createContainerIfNotExists(cosmosContainerProperties, throughputProperties); } return cosmosContainerResponseMono .map(cosmosContainerResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null); return cosmosContainerResponse; }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create container", throwable)); }); }
class ReactiveCosmosTemplate implements ReactiveCosmosOperations, ApplicationContextAware { private final MappingCosmosConverter mappingCosmosConverter; private final String databaseName; private final ResponseDiagnosticsProcessor responseDiagnosticsProcessor; private final boolean queryMetricsEnabled; private final CosmosAsyncClient cosmosAsyncClient; private final IsNewAwareAuditingHandler cosmosAuditingHandler; /** * Initialization * * @param client must not be {@literal null} * @param databaseName must not be {@literal null} * @param cosmosConfig must not be {@literal null} * @param mappingCosmosConverter must not be {@literal null} * @param cosmosAuditingHandler can be {@literal null} */ public ReactiveCosmosTemplate(CosmosAsyncClient client, String databaseName, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter, IsNewAwareAuditingHandler cosmosAuditingHandler) { this(new CosmosFactory(client, databaseName), cosmosConfig, mappingCosmosConverter, cosmosAuditingHandler); } /** * Initialization * * @param client must not be {@literal null} * @param databaseName must not be {@literal null} * @param cosmosConfig must not be {@literal null} * @param mappingCosmosConverter must not be {@literal null} */ public ReactiveCosmosTemplate(CosmosAsyncClient client, String databaseName, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter) { this(new CosmosFactory(client, databaseName), cosmosConfig, mappingCosmosConverter, null); } /** * Constructor * * @param cosmosFactory the cosmos db factory * @param cosmosConfig the cosmos config * @param mappingCosmosConverter the mappingCosmosConverter * @param cosmosAuditingHandler the auditing handler */ public ReactiveCosmosTemplate(CosmosFactory cosmosFactory, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter, IsNewAwareAuditingHandler cosmosAuditingHandler) { Assert.notNull(cosmosFactory, "CosmosFactory must not be null!"); Assert.notNull(cosmosConfig, "CosmosConfig must not be null!"); Assert.notNull(mappingCosmosConverter, "MappingCosmosConverter must not be null!"); this.mappingCosmosConverter = mappingCosmosConverter; this.cosmosAsyncClient = cosmosFactory.getCosmosAsyncClient(); this.databaseName = cosmosFactory.getDatabaseName(); this.responseDiagnosticsProcessor = cosmosConfig.getResponseDiagnosticsProcessor(); this.queryMetricsEnabled = cosmosConfig.isQueryMetricsEnabled(); this.cosmosAuditingHandler = cosmosAuditingHandler; } /** * Initialization * * @param cosmosFactory must not be {@literal null} * @param cosmosConfig must not be {@literal null} * @param mappingCosmosConverter must not be {@literal null} */ public ReactiveCosmosTemplate(CosmosFactory cosmosFactory, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter) { this(cosmosFactory, cosmosConfig, mappingCosmosConverter, null); } /** * @param applicationContext the application context * @throws BeansException the bean exception */ public void setApplicationContext(@NonNull ApplicationContext applicationContext) throws BeansException { } /** * Creates a container if it doesn't already exist * * @param information the CosmosEntityInformation * @return Mono containing CosmosContainerResponse */ @Override /** * Find all items in a given container * * @param containerName the containerName * @param domainType the domainType * @return Flux with all the found items or error */ @Override public <T> Flux<T> findAll(String containerName, Class<T> domainType) { final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return find(query, domainType, containerName); } /** * Find all items in a given container * * @param domainType the domainType * @return Flux with all the found items or error */ @Override public <T> Flux<T> findAll(Class<T> domainType) { return findAll(domainType.getSimpleName(), domainType); } @Override public <T> Flux<T> findAll(PartitionKey partitionKey, Class<T> domainType) { Assert.notNull(partitionKey, "partitionKey should not be null"); Assert.notNull(domainType, "domainType should not be null"); final String containerName = getContainerName(domainType); final CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); cosmosQueryRequestOptions.setPartitionKey(partitionKey); cosmosQueryRequestOptions.setQueryMetricsEnabled(this.queryMetricsEnabled); return cosmosAsyncClient .getDatabase(this.databaseName) .getContainer(containerName) .queryItems("SELECT * FROM r", cosmosQueryRequestOptions, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Flux.fromIterable(cosmosItemFeedResponse.getResults()); }) .map(cosmosItemProperties -> toDomainObject(domainType, cosmosItemProperties)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to find items", throwable)); } /** * Find by id * * @param id the id * @param domainType the domainType * @return Mono with the item or error */ @Override public <T> Mono<T> findById(Object id, Class<T> domainType) { Assert.notNull(domainType, "domainType should not be null"); return findById(getContainerName(domainType), id, domainType); } /** * Find by id * * @param containerName the container name * @param id the id * @param domainType the entity class * @return Mono with the item or error */ @Override public <T> Mono<T> findById(String containerName, Object id, Class<T> domainType) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(domainType, "domainType should not be null"); final String query = String.format("select * from root where root.id = '%s'", CosmosUtils.getStringIDValue(id)); final CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(this.queryMetricsEnabled); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .queryItems(query, options, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Mono.justOrEmpty(cosmosItemFeedResponse .getResults() .stream() .map(cosmosItem -> toDomainObject(domainType, cosmosItem)) .findFirst()); }) .onErrorResume(throwable -> CosmosExceptionUtils.findAPIExceptionHandler("Failed to find item", throwable)) .next(); } /** * Find by id * * @param id the id * @param domainType the entity class * @param partitionKey partition Key * @return Mono with the item or error */ @Override public <T> Mono<T> findById(Object id, Class<T> domainType, PartitionKey partitionKey) { Assert.notNull(domainType, "domainType should not be null"); String idToFind = CosmosUtils.getStringIDValue(id); final String containerName = getContainerName(domainType); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .readItem(idToFind, partitionKey, JsonNode.class) .publishOn(Schedulers.parallel()) .flatMap(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return Mono.justOrEmpty(toDomainObject(domainType, cosmosItemResponse.getItem())); }) .onErrorResume(throwable -> CosmosExceptionUtils.findAPIExceptionHandler("Failed to find item", throwable)); } /** * Insert * * @param <T> type of inserted objectToSave * @param objectToSave the object to save * @param partitionKey the partition key * @return Mono with the item or error */ public <T> Mono<T> insert(T objectToSave, PartitionKey partitionKey) { return insert(getContainerName(objectToSave.getClass()), objectToSave, partitionKey); } /** * Insert * * @param objectToSave the object to save * @param <T> type of inserted objectToSave * @return Mono with the item or error */ public <T> Mono<T> insert(T objectToSave) { return insert(getContainerName(objectToSave.getClass()), objectToSave, null); } /** * Insert * * @param <T> type of inserted objectToSave * @param containerName the container name * @param objectToSave the object to save * @param partitionKey the partition key * @return Mono with the item or error */ public <T> Mono<T> insert(String containerName, Object objectToSave, PartitionKey partitionKey) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(objectToSave, "objectToSave should not be null"); final Class<T> domainType = (Class<T>) objectToSave.getClass(); generateIdIfNullAndAutoGenerationEnabled(objectToSave, domainType); final JsonNode originalItem = prepareToPersistAndConvertToItemProperties(objectToSave); final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName); Mono<CosmosItemResponse<JsonNode>> item; if (partitionKey == null) { item = cosmosAsyncContainer.createItem(originalItem, options); } else { item = cosmosAsyncContainer.createItem(originalItem, partitionKey, options); } return item.publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to insert item", throwable)) .flatMap(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return Mono.just(toDomainObject(domainType, cosmosItemResponse.getItem())); }); } /** * Insert * * @param <T> type of inserted objectToSave * @param containerName the container name * @param objectToSave the object to save * @return Mono with the item or error */ @Override public <T> Mono<T> insert(String containerName, T objectToSave) { return insert(getContainerName(objectToSave.getClass()), objectToSave, null); } @SuppressWarnings("unchecked") private <T> void generateIdIfNullAndAutoGenerationEnabled(T originalItem, Class<?> type) { CosmosEntityInformation<?, ?> entityInfo = CosmosEntityInformation.getInstance(type); if (entityInfo.shouldGenerateId() && ReflectionUtils.getField(entityInfo.getIdField(), originalItem) == null) { ReflectionUtils.setField(entityInfo.getIdField(), originalItem, UUID.randomUUID().toString()); } } /** * Upsert * * @param object the object to upsert * @return Mono with the item or error */ @Override public <T> Mono<T> upsert(T object) { return upsert(getContainerName(object.getClass()), object); } /** * Upsert * * @param containerName the container name * @param object the object to save * @return Mono with the item or error */ @Override public <T> Mono<T> upsert(String containerName, T object) { final Class<T> domainType = (Class<T>) object.getClass(); final JsonNode originalItem = prepareToPersistAndConvertToItemProperties(object); final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); applyVersioning(object.getClass(), originalItem, options); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .upsertItem(originalItem, options) .publishOn(Schedulers.parallel()) .flatMap(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return Mono.just(toDomainObject(domainType, cosmosItemResponse.getItem())); }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to upsert item", throwable)); } /** * Deletes the item with id and partition key. * * @param containerName Container name of database * @param id item id * @param partitionKey the partition key */ @Override public Mono<Void> deleteById(String containerName, Object id, PartitionKey partitionKey) { return deleteById(containerName, id, partitionKey, new CosmosItemRequestOptions()); } private Mono<Void> deleteById(String containerName, Object id, PartitionKey partitionKey, CosmosItemRequestOptions cosmosItemRequestOptions) { Assert.hasText(containerName, "container name should not be null, empty or only whitespaces"); String idToDelete = CosmosUtils.getStringIDValue(id); if (partitionKey == null) { partitionKey = PartitionKey.NONE; } return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .deleteItem(idToDelete, partitionKey, cosmosItemRequestOptions) .publishOn(Schedulers.parallel()) .doOnNext(cosmosItemResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to delete item", throwable)) .then(); } /** * Deletes the entity * * @param <T> type class of domain type * @param containerName Container name of database * @param entity the entity to delete * @return void Mono */ public <T> Mono<Void> deleteEntity(String containerName, T entity) { Assert.notNull(entity, "entity to be deleted should not be null"); @SuppressWarnings("unchecked") final Class<T> domainType = (Class<T>) entity.getClass(); final JsonNode originalItem = mappingCosmosConverter.writeJsonNode(entity); final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); applyVersioning(entity.getClass(), originalItem, options); return deleteItem(originalItem, containerName, domainType).then(); } /** * Delete all items in a container * * @param containerName the container name * @param domainType the domainType * @return void Mono */ @Override public Mono<Void> deleteAll(@NonNull String containerName, @NonNull Class<?> domainType) { Assert.hasText(containerName, "container name should not be null, empty or only whitespaces"); final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return this.delete(query, domainType, containerName).then(); } /** * Delete items matching query * * @param query the document query * @param domainType the entity class * @param containerName the container name * @return Mono */ @Override public <T> Flux<T> delete(CosmosQuery query, Class<T> domainType, String containerName) { Assert.notNull(query, "DocumentQuery should not be null."); Assert.notNull(domainType, "domainType should not be null."); Assert.hasText(containerName, "container name should not be null, empty or only whitespaces"); final Flux<JsonNode> results = findItems(query, containerName); return results.flatMap(d -> deleteItem(d, containerName, domainType)); } /** * Find items * * @param query the document query * @param domainType the entity class * @param containerName the container name * @return Flux with found items or error */ @Override public <T> Flux<T> find(CosmosQuery query, Class<T> domainType, String containerName) { return findItems(query, containerName) .map(cosmosItemProperties -> toDomainObject(domainType, cosmosItemProperties)); } /** * Exists * * @param query the document query * @param domainType the entity class * @param containerName the container name * @return Mono with a boolean or error */ @Override public Mono<Boolean> exists(CosmosQuery query, Class<?> domainType, String containerName) { return count(query, containerName).flatMap(count -> Mono.just(count > 0)); } /** * Exists * * @param id the id * @param domainType the entity class * @param containerName the container name * @return Mono with a boolean or error */ public Mono<Boolean> existsById(Object id, Class<?> domainType, String containerName) { return findById(containerName, id, domainType) .flatMap(o -> Mono.just(o != null)); } /** * Count * * @param containerName the container name * @return Mono with the count or error */ @Override public Mono<Long> count(String containerName) { final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return count(query, containerName); } /** * Count * * @param query the document query * @param containerName the container name * @return Mono with count or error */ @Override public Mono<Long> count(CosmosQuery query, String containerName) { return getCountValue(query, containerName); } @Override public MappingCosmosConverter getConverter() { return mappingCosmosConverter; } private Mono<Long> getCountValue(CosmosQuery query, String containerName) { final SqlQuerySpec querySpec = new CountQueryGenerator().generateCosmos(query); final CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(this.queryMetricsEnabled); return executeQuery(querySpec, containerName, options) .doOnNext(feedResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, feedResponse.getCosmosDiagnostics(), feedResponse)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to get count value", throwable)) .next() .map(r -> r.getResults().get(0).asLong()); } private Flux<FeedResponse<JsonNode>> executeQuery(SqlQuerySpec sqlQuerySpec, String containerName, CosmosQueryRequestOptions options) { return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .queryItems(sqlQuerySpec, options, JsonNode.class) .byPage() .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to execute query", throwable)); } /** * Delete container with container name * * @param containerName the container name */ @Override public void deleteContainer(@NonNull String containerName) { Assert.hasText(containerName, "containerName should have text."); cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .delete() .doOnNext(cosmosContainerResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to delete container", throwable)) .block(); } /** * @param domainType the domain class * @return the container name */ public String getContainerName(Class<?> domainType) { Assert.notNull(domainType, "domainType should not be null"); return CosmosEntityInformation.getInstance(domainType).getContainerName(); } private JsonNode prepareToPersistAndConvertToItemProperties(Object object) { if (cosmosAuditingHandler != null) { cosmosAuditingHandler.markAudited(object); } return mappingCosmosConverter.writeJsonNode(object); } private Flux<JsonNode> findItems(@NonNull CosmosQuery query, @NonNull String containerName) { final SqlQuerySpec sqlQuerySpec = new FindQuerySpecGenerator().generateCosmos(query); final CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); cosmosQueryRequestOptions.setQueryMetricsEnabled(this.queryMetricsEnabled); return cosmosAsyncClient .getDatabase(this.databaseName) .getContainer(containerName) .queryItems(sqlQuerySpec, cosmosQueryRequestOptions, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Flux.fromIterable(cosmosItemFeedResponse.getResults()); }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to query items", throwable)); } private <T> Mono<T> deleteItem(@NonNull JsonNode jsonNode, String containerName, @NonNull Class<T> domainType) { final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); applyVersioning(domainType, jsonNode, options); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .deleteItem(jsonNode, options) .publishOn(Schedulers.parallel()) .map(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return cosmosItemResponse; }) .flatMap(objectCosmosItemResponse -> Mono.just(toDomainObject(domainType, jsonNode))) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to delete item", throwable)); } private <T> T toDomainObject(@NonNull Class<T> domainType, JsonNode jsonNode) { return mappingCosmosConverter.read(domainType, jsonNode); } private void applyVersioning(Class<?> domainType, JsonNode jsonNode, CosmosItemRequestOptions options) { CosmosEntityInformation<?, ?> entityInformation = CosmosEntityInformation.getInstance(domainType); if (entityInformation.isVersioned()) { options.setIfMatchETag(jsonNode.get(Constants.ETAG_PROPERTY_DEFAULT_NAME).asText()); } } }
class ReactiveCosmosTemplate implements ReactiveCosmosOperations, ApplicationContextAware { private final MappingCosmosConverter mappingCosmosConverter; private final String databaseName; private final ResponseDiagnosticsProcessor responseDiagnosticsProcessor; private final boolean queryMetricsEnabled; private final CosmosAsyncClient cosmosAsyncClient; private final IsNewAwareAuditingHandler cosmosAuditingHandler; /** * Initialization * * @param client must not be {@literal null} * @param databaseName must not be {@literal null} * @param cosmosConfig must not be {@literal null} * @param mappingCosmosConverter must not be {@literal null} * @param cosmosAuditingHandler can be {@literal null} */ public ReactiveCosmosTemplate(CosmosAsyncClient client, String databaseName, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter, IsNewAwareAuditingHandler cosmosAuditingHandler) { this(new CosmosFactory(client, databaseName), cosmosConfig, mappingCosmosConverter, cosmosAuditingHandler); } /** * Initialization * * @param client must not be {@literal null} * @param databaseName must not be {@literal null} * @param cosmosConfig must not be {@literal null} * @param mappingCosmosConverter must not be {@literal null} */ public ReactiveCosmosTemplate(CosmosAsyncClient client, String databaseName, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter) { this(new CosmosFactory(client, databaseName), cosmosConfig, mappingCosmosConverter, null); } /** * Constructor * * @param cosmosFactory the cosmos db factory * @param cosmosConfig the cosmos config * @param mappingCosmosConverter the mappingCosmosConverter * @param cosmosAuditingHandler the auditing handler */ public ReactiveCosmosTemplate(CosmosFactory cosmosFactory, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter, IsNewAwareAuditingHandler cosmosAuditingHandler) { Assert.notNull(cosmosFactory, "CosmosFactory must not be null!"); Assert.notNull(cosmosConfig, "CosmosConfig must not be null!"); Assert.notNull(mappingCosmosConverter, "MappingCosmosConverter must not be null!"); this.mappingCosmosConverter = mappingCosmosConverter; this.cosmosAsyncClient = cosmosFactory.getCosmosAsyncClient(); this.databaseName = cosmosFactory.getDatabaseName(); this.responseDiagnosticsProcessor = cosmosConfig.getResponseDiagnosticsProcessor(); this.queryMetricsEnabled = cosmosConfig.isQueryMetricsEnabled(); this.cosmosAuditingHandler = cosmosAuditingHandler; } /** * Initialization * * @param cosmosFactory must not be {@literal null} * @param cosmosConfig must not be {@literal null} * @param mappingCosmosConverter must not be {@literal null} */ public ReactiveCosmosTemplate(CosmosFactory cosmosFactory, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter) { this(cosmosFactory, cosmosConfig, mappingCosmosConverter, null); } /** * @param applicationContext the application context * @throws BeansException the bean exception */ public void setApplicationContext(@NonNull ApplicationContext applicationContext) throws BeansException { } /** * Creates a container if it doesn't already exist * * @param information the CosmosEntityInformation * @return Mono containing CosmosContainerResponse */ @Override /** * Find all items in a given container * * @param containerName the containerName * @param domainType the domainType * @return Flux with all the found items or error */ @Override public <T> Flux<T> findAll(String containerName, Class<T> domainType) { final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return find(query, domainType, containerName); } /** * Find all items in a given container * * @param domainType the domainType * @return Flux with all the found items or error */ @Override public <T> Flux<T> findAll(Class<T> domainType) { return findAll(domainType.getSimpleName(), domainType); } @Override public <T> Flux<T> findAll(PartitionKey partitionKey, Class<T> domainType) { Assert.notNull(partitionKey, "partitionKey should not be null"); Assert.notNull(domainType, "domainType should not be null"); final String containerName = getContainerName(domainType); final CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); cosmosQueryRequestOptions.setPartitionKey(partitionKey); cosmosQueryRequestOptions.setQueryMetricsEnabled(this.queryMetricsEnabled); return cosmosAsyncClient .getDatabase(this.databaseName) .getContainer(containerName) .queryItems("SELECT * FROM r", cosmosQueryRequestOptions, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Flux.fromIterable(cosmosItemFeedResponse.getResults()); }) .map(cosmosItemProperties -> toDomainObject(domainType, cosmosItemProperties)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to find items", throwable)); } /** * Find by id * * @param id the id * @param domainType the domainType * @return Mono with the item or error */ @Override public <T> Mono<T> findById(Object id, Class<T> domainType) { Assert.notNull(domainType, "domainType should not be null"); return findById(getContainerName(domainType), id, domainType); } /** * Find by id * * @param containerName the container name * @param id the id * @param domainType the entity class * @return Mono with the item or error */ @Override public <T> Mono<T> findById(String containerName, Object id, Class<T> domainType) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(domainType, "domainType should not be null"); final String query = String.format("select * from root where root.id = '%s'", CosmosUtils.getStringIDValue(id)); final CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(this.queryMetricsEnabled); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .queryItems(query, options, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Mono.justOrEmpty(cosmosItemFeedResponse .getResults() .stream() .map(cosmosItem -> toDomainObject(domainType, cosmosItem)) .findFirst()); }) .onErrorResume(throwable -> CosmosExceptionUtils.findAPIExceptionHandler("Failed to find item", throwable)) .next(); } /** * Find by id * * @param id the id * @param domainType the entity class * @param partitionKey partition Key * @return Mono with the item or error */ @Override public <T> Mono<T> findById(Object id, Class<T> domainType, PartitionKey partitionKey) { Assert.notNull(domainType, "domainType should not be null"); String idToFind = CosmosUtils.getStringIDValue(id); final String containerName = getContainerName(domainType); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .readItem(idToFind, partitionKey, JsonNode.class) .publishOn(Schedulers.parallel()) .flatMap(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return Mono.justOrEmpty(toDomainObject(domainType, cosmosItemResponse.getItem())); }) .onErrorResume(throwable -> CosmosExceptionUtils.findAPIExceptionHandler("Failed to find item", throwable)); } /** * Insert * * @param <T> type of inserted objectToSave * @param objectToSave the object to save * @param partitionKey the partition key * @return Mono with the item or error */ public <T> Mono<T> insert(T objectToSave, PartitionKey partitionKey) { return insert(getContainerName(objectToSave.getClass()), objectToSave, partitionKey); } /** * Insert * * @param objectToSave the object to save * @param <T> type of inserted objectToSave * @return Mono with the item or error */ public <T> Mono<T> insert(T objectToSave) { return insert(getContainerName(objectToSave.getClass()), objectToSave, null); } /** * Insert * * @param <T> type of inserted objectToSave * @param containerName the container name * @param objectToSave the object to save * @param partitionKey the partition key * @return Mono with the item or error */ public <T> Mono<T> insert(String containerName, Object objectToSave, PartitionKey partitionKey) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(objectToSave, "objectToSave should not be null"); final Class<T> domainType = (Class<T>) objectToSave.getClass(); generateIdIfNullAndAutoGenerationEnabled(objectToSave, domainType); final JsonNode originalItem = prepareToPersistAndConvertToItemProperties(objectToSave); final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); return cosmosAsyncClient .getDatabase(this.databaseName) .getContainer(containerName) .createItem(originalItem, partitionKey, options) .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to insert item", throwable)) .flatMap(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return Mono.just(toDomainObject(domainType, cosmosItemResponse.getItem())); }); } /** * Insert * * @param <T> type of inserted objectToSave * @param containerName the container name * @param objectToSave the object to save * @return Mono with the item or error */ @Override public <T> Mono<T> insert(String containerName, T objectToSave) { return insert(containerName, objectToSave, null); } @SuppressWarnings("unchecked") private <T> void generateIdIfNullAndAutoGenerationEnabled(T originalItem, Class<?> type) { CosmosEntityInformation<?, ?> entityInfo = CosmosEntityInformation.getInstance(type); if (entityInfo.shouldGenerateId() && ReflectionUtils.getField(entityInfo.getIdField(), originalItem) == null) { ReflectionUtils.setField(entityInfo.getIdField(), originalItem, UUID.randomUUID().toString()); } } /** * Upsert * * @param object the object to upsert * @return Mono with the item or error */ @Override public <T> Mono<T> upsert(T object) { return upsert(getContainerName(object.getClass()), object); } /** * Upsert * * @param containerName the container name * @param object the object to save * @return Mono with the item or error */ @Override public <T> Mono<T> upsert(String containerName, T object) { final Class<T> domainType = (Class<T>) object.getClass(); final JsonNode originalItem = prepareToPersistAndConvertToItemProperties(object); final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); applyVersioning(object.getClass(), originalItem, options); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .upsertItem(originalItem, options) .publishOn(Schedulers.parallel()) .flatMap(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return Mono.just(toDomainObject(domainType, cosmosItemResponse.getItem())); }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to upsert item", throwable)); } /** * Deletes the item with id and partition key. * * @param containerName Container name of database * @param id item id * @param partitionKey the partition key */ @Override public Mono<Void> deleteById(String containerName, Object id, PartitionKey partitionKey) { return deleteById(containerName, id, partitionKey, new CosmosItemRequestOptions()); } private Mono<Void> deleteById(String containerName, Object id, PartitionKey partitionKey, CosmosItemRequestOptions cosmosItemRequestOptions) { Assert.hasText(containerName, "container name should not be null, empty or only whitespaces"); String idToDelete = CosmosUtils.getStringIDValue(id); if (partitionKey == null) { partitionKey = PartitionKey.NONE; } return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .deleteItem(idToDelete, partitionKey, cosmosItemRequestOptions) .publishOn(Schedulers.parallel()) .doOnNext(cosmosItemResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to delete item", throwable)) .then(); } /** * Deletes the entity * * @param <T> type class of domain type * @param containerName Container name of database * @param entity the entity to delete * @return void Mono */ public <T> Mono<Void> deleteEntity(String containerName, T entity) { Assert.notNull(entity, "entity to be deleted should not be null"); @SuppressWarnings("unchecked") final Class<T> domainType = (Class<T>) entity.getClass(); final JsonNode originalItem = mappingCosmosConverter.writeJsonNode(entity); final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); applyVersioning(entity.getClass(), originalItem, options); return deleteItem(originalItem, containerName, domainType).then(); } /** * Delete all items in a container * * @param containerName the container name * @param domainType the domainType * @return void Mono */ @Override public Mono<Void> deleteAll(@NonNull String containerName, @NonNull Class<?> domainType) { Assert.hasText(containerName, "container name should not be null, empty or only whitespaces"); final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return this.delete(query, domainType, containerName).then(); } /** * Delete items matching query * * @param query the document query * @param domainType the entity class * @param containerName the container name * @return Mono */ @Override public <T> Flux<T> delete(CosmosQuery query, Class<T> domainType, String containerName) { Assert.notNull(query, "DocumentQuery should not be null."); Assert.notNull(domainType, "domainType should not be null."); Assert.hasText(containerName, "container name should not be null, empty or only whitespaces"); final Flux<JsonNode> results = findItems(query, containerName); return results.flatMap(d -> deleteItem(d, containerName, domainType)); } /** * Find items * * @param query the document query * @param domainType the entity class * @param containerName the container name * @return Flux with found items or error */ @Override public <T> Flux<T> find(CosmosQuery query, Class<T> domainType, String containerName) { return findItems(query, containerName) .map(cosmosItemProperties -> toDomainObject(domainType, cosmosItemProperties)); } /** * Exists * * @param query the document query * @param domainType the entity class * @param containerName the container name * @return Mono with a boolean or error */ @Override public Mono<Boolean> exists(CosmosQuery query, Class<?> domainType, String containerName) { return count(query, containerName).flatMap(count -> Mono.just(count > 0)); } /** * Exists * * @param id the id * @param domainType the entity class * @param containerName the container name * @return Mono with a boolean or error */ public Mono<Boolean> existsById(Object id, Class<?> domainType, String containerName) { return findById(containerName, id, domainType) .flatMap(o -> Mono.just(o != null)); } /** * Count * * @param containerName the container name * @return Mono with the count or error */ @Override public Mono<Long> count(String containerName) { final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return count(query, containerName); } /** * Count * * @param query the document query * @param containerName the container name * @return Mono with count or error */ @Override public Mono<Long> count(CosmosQuery query, String containerName) { return getCountValue(query, containerName); } @Override public MappingCosmosConverter getConverter() { return mappingCosmosConverter; } private Mono<Long> getCountValue(CosmosQuery query, String containerName) { final SqlQuerySpec querySpec = new CountQueryGenerator().generateCosmos(query); final CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(this.queryMetricsEnabled); return executeQuery(querySpec, containerName, options) .doOnNext(feedResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, feedResponse.getCosmosDiagnostics(), feedResponse)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to get count value", throwable)) .next() .map(r -> r.getResults().get(0).asLong()); } private Flux<FeedResponse<JsonNode>> executeQuery(SqlQuerySpec sqlQuerySpec, String containerName, CosmosQueryRequestOptions options) { return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .queryItems(sqlQuerySpec, options, JsonNode.class) .byPage() .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to execute query", throwable)); } /** * Delete container with container name * * @param containerName the container name */ @Override public void deleteContainer(@NonNull String containerName) { Assert.hasText(containerName, "containerName should have text."); cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .delete() .doOnNext(cosmosContainerResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to delete container", throwable)) .block(); } /** * @param domainType the domain class * @return the container name */ public String getContainerName(Class<?> domainType) { Assert.notNull(domainType, "domainType should not be null"); return CosmosEntityInformation.getInstance(domainType).getContainerName(); } private JsonNode prepareToPersistAndConvertToItemProperties(Object object) { if (cosmosAuditingHandler != null) { cosmosAuditingHandler.markAudited(object); } return mappingCosmosConverter.writeJsonNode(object); } private Flux<JsonNode> findItems(@NonNull CosmosQuery query, @NonNull String containerName) { final SqlQuerySpec sqlQuerySpec = new FindQuerySpecGenerator().generateCosmos(query); final CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); cosmosQueryRequestOptions.setQueryMetricsEnabled(this.queryMetricsEnabled); return cosmosAsyncClient .getDatabase(this.databaseName) .getContainer(containerName) .queryItems(sqlQuerySpec, cosmosQueryRequestOptions, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Flux.fromIterable(cosmosItemFeedResponse.getResults()); }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to query items", throwable)); } private <T> Mono<T> deleteItem(@NonNull JsonNode jsonNode, String containerName, @NonNull Class<T> domainType) { final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); applyVersioning(domainType, jsonNode, options); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .deleteItem(jsonNode, options) .publishOn(Schedulers.parallel()) .map(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return cosmosItemResponse; }) .flatMap(objectCosmosItemResponse -> Mono.just(toDomainObject(domainType, jsonNode))) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to delete item", throwable)); } private <T> T toDomainObject(@NonNull Class<T> domainType, JsonNode jsonNode) { return mappingCosmosConverter.read(domainType, jsonNode); } private void applyVersioning(Class<?> domainType, JsonNode jsonNode, CosmosItemRequestOptions options) { CosmosEntityInformation<?, ?> entityInformation = CosmosEntityInformation.getInstance(domainType); if (entityInformation.isVersioned()) { options.setIfMatchETag(jsonNode.get(Constants.ETAG_PROPERTY_DEFAULT_NAME).asText()); } } }
Makes sense, updated.
public CosmosContainerProperties createContainerIfNotExists(CosmosEntityInformation<?, ?> information) { final String partitionKeyPath = information.getPartitionKeyPath(); final CosmosContainerResponse response = cosmosAsyncClient .createDatabaseIfNotExists(this.databaseName) .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create database", throwable)) .flatMap(cosmosDatabaseResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosDatabaseResponse.getDiagnostics(), null); final CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(information.getContainerName(), partitionKeyPath); cosmosContainerProperties.setDefaultTimeToLiveInSeconds(information.getTimeToLive()); cosmosContainerProperties.setIndexingPolicy(information.getIndexingPolicy()); CosmosAsyncDatabase cosmosAsyncDatabase = cosmosAsyncClient .getDatabase(cosmosDatabaseResponse.getProperties().getId()); Mono<CosmosContainerResponse> cosmosContainerResponseMono; if (information.getRequestUnit() == null) { cosmosContainerResponseMono = cosmosAsyncDatabase.createContainerIfNotExists(cosmosContainerProperties); } else { ThroughputProperties throughputProperties = ThroughputProperties.createManualThroughput(information.getRequestUnit()); cosmosContainerResponseMono = cosmosAsyncDatabase.createContainerIfNotExists(cosmosContainerProperties, throughputProperties); } return cosmosContainerResponseMono .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create container", throwable)) .doOnNext(cosmosContainerResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null)); }) .block(); assert response != null; return response.getProperties(); }
new CosmosContainerProperties(information.getContainerName(), partitionKeyPath);
public CosmosContainerProperties createContainerIfNotExists(CosmosEntityInformation<?, ?> information) { final CosmosContainerResponse response = cosmosAsyncClient .createDatabaseIfNotExists(this.databaseName) .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create database", throwable)) .flatMap(cosmosDatabaseResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosDatabaseResponse.getDiagnostics(), null); final CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(information.getContainerName(), information.getPartitionKeyPath()); cosmosContainerProperties.setDefaultTimeToLiveInSeconds(information.getTimeToLive()); cosmosContainerProperties.setIndexingPolicy(information.getIndexingPolicy()); CosmosAsyncDatabase cosmosAsyncDatabase = cosmosAsyncClient .getDatabase(cosmosDatabaseResponse.getProperties().getId()); Mono<CosmosContainerResponse> cosmosContainerResponseMono; if (information.getRequestUnit() == null) { cosmosContainerResponseMono = cosmosAsyncDatabase.createContainerIfNotExists(cosmosContainerProperties); } else { ThroughputProperties throughputProperties = ThroughputProperties.createManualThroughput(information.getRequestUnit()); cosmosContainerResponseMono = cosmosAsyncDatabase.createContainerIfNotExists(cosmosContainerProperties, throughputProperties); } return cosmosContainerResponseMono .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create container", throwable)) .doOnNext(cosmosContainerResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null)); }) .block(); assert response != null; return response.getProperties(); }
class type of domain * @return found results in a List */ public <T> Iterable<T> findAll(String containerName, final Class<T> domainType) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(domainType, "domainType should not be null"); final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return findItems(query, containerName, domainType); }
class type of domain * @return found results in a List */ public <T> Iterable<T> findAll(String containerName, final Class<T> domainType) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(domainType, "domainType should not be null"); final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return findItems(query, containerName, domainType); }
Makes sense, updated.
public Mono<CosmosContainerResponse> createContainerIfNotExists(CosmosEntityInformation<?, ?> information) { final String partitionKeyPath = information.getPartitionKeyPath(); return cosmosAsyncClient .createDatabaseIfNotExists(this.databaseName) .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create database", throwable)) .flatMap(cosmosDatabaseResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosDatabaseResponse.getDiagnostics(), null); final CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(information.getContainerName(), partitionKeyPath); cosmosContainerProperties.setDefaultTimeToLiveInSeconds(information.getTimeToLive()); cosmosContainerProperties.setIndexingPolicy(information.getIndexingPolicy()); CosmosAsyncDatabase database = cosmosAsyncClient.getDatabase(cosmosDatabaseResponse.getProperties().getId()); Mono<CosmosContainerResponse> cosmosContainerResponseMono; if (information.getRequestUnit() == null) { cosmosContainerResponseMono = database.createContainerIfNotExists(cosmosContainerProperties); } else { ThroughputProperties throughputProperties = ThroughputProperties.createManualThroughput(information.getRequestUnit()); cosmosContainerResponseMono = database.createContainerIfNotExists(cosmosContainerProperties, throughputProperties); } return cosmosContainerResponseMono .map(cosmosContainerResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null); return cosmosContainerResponse; }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create container", throwable)); }); }
new CosmosContainerProperties(information.getContainerName(), partitionKeyPath);
public Mono<CosmosContainerResponse> createContainerIfNotExists(CosmosEntityInformation<?, ?> information) { return cosmosAsyncClient .createDatabaseIfNotExists(this.databaseName) .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create database", throwable)) .flatMap(cosmosDatabaseResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosDatabaseResponse.getDiagnostics(), null); final CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(information.getContainerName(), information.getPartitionKeyPath()); cosmosContainerProperties.setDefaultTimeToLiveInSeconds(information.getTimeToLive()); cosmosContainerProperties.setIndexingPolicy(information.getIndexingPolicy()); CosmosAsyncDatabase database = cosmosAsyncClient.getDatabase(cosmosDatabaseResponse.getProperties().getId()); Mono<CosmosContainerResponse> cosmosContainerResponseMono; if (information.getRequestUnit() == null) { cosmosContainerResponseMono = database.createContainerIfNotExists(cosmosContainerProperties); } else { ThroughputProperties throughputProperties = ThroughputProperties.createManualThroughput(information.getRequestUnit()); cosmosContainerResponseMono = database.createContainerIfNotExists(cosmosContainerProperties, throughputProperties); } return cosmosContainerResponseMono .map(cosmosContainerResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null); return cosmosContainerResponse; }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to create container", throwable)); }); }
class ReactiveCosmosTemplate implements ReactiveCosmosOperations, ApplicationContextAware { private final MappingCosmosConverter mappingCosmosConverter; private final String databaseName; private final ResponseDiagnosticsProcessor responseDiagnosticsProcessor; private final boolean queryMetricsEnabled; private final CosmosAsyncClient cosmosAsyncClient; private final IsNewAwareAuditingHandler cosmosAuditingHandler; /** * Initialization * * @param client must not be {@literal null} * @param databaseName must not be {@literal null} * @param cosmosConfig must not be {@literal null} * @param mappingCosmosConverter must not be {@literal null} * @param cosmosAuditingHandler can be {@literal null} */ public ReactiveCosmosTemplate(CosmosAsyncClient client, String databaseName, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter, IsNewAwareAuditingHandler cosmosAuditingHandler) { this(new CosmosFactory(client, databaseName), cosmosConfig, mappingCosmosConverter, cosmosAuditingHandler); } /** * Initialization * * @param client must not be {@literal null} * @param databaseName must not be {@literal null} * @param cosmosConfig must not be {@literal null} * @param mappingCosmosConverter must not be {@literal null} */ public ReactiveCosmosTemplate(CosmosAsyncClient client, String databaseName, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter) { this(new CosmosFactory(client, databaseName), cosmosConfig, mappingCosmosConverter, null); } /** * Constructor * * @param cosmosFactory the cosmos db factory * @param cosmosConfig the cosmos config * @param mappingCosmosConverter the mappingCosmosConverter * @param cosmosAuditingHandler the auditing handler */ public ReactiveCosmosTemplate(CosmosFactory cosmosFactory, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter, IsNewAwareAuditingHandler cosmosAuditingHandler) { Assert.notNull(cosmosFactory, "CosmosFactory must not be null!"); Assert.notNull(cosmosConfig, "CosmosConfig must not be null!"); Assert.notNull(mappingCosmosConverter, "MappingCosmosConverter must not be null!"); this.mappingCosmosConverter = mappingCosmosConverter; this.cosmosAsyncClient = cosmosFactory.getCosmosAsyncClient(); this.databaseName = cosmosFactory.getDatabaseName(); this.responseDiagnosticsProcessor = cosmosConfig.getResponseDiagnosticsProcessor(); this.queryMetricsEnabled = cosmosConfig.isQueryMetricsEnabled(); this.cosmosAuditingHandler = cosmosAuditingHandler; } /** * Initialization * * @param cosmosFactory must not be {@literal null} * @param cosmosConfig must not be {@literal null} * @param mappingCosmosConverter must not be {@literal null} */ public ReactiveCosmosTemplate(CosmosFactory cosmosFactory, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter) { this(cosmosFactory, cosmosConfig, mappingCosmosConverter, null); } /** * @param applicationContext the application context * @throws BeansException the bean exception */ public void setApplicationContext(@NonNull ApplicationContext applicationContext) throws BeansException { } /** * Creates a container if it doesn't already exist * * @param information the CosmosEntityInformation * @return Mono containing CosmosContainerResponse */ @Override /** * Find all items in a given container * * @param containerName the containerName * @param domainType the domainType * @return Flux with all the found items or error */ @Override public <T> Flux<T> findAll(String containerName, Class<T> domainType) { final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return find(query, domainType, containerName); } /** * Find all items in a given container * * @param domainType the domainType * @return Flux with all the found items or error */ @Override public <T> Flux<T> findAll(Class<T> domainType) { return findAll(domainType.getSimpleName(), domainType); } @Override public <T> Flux<T> findAll(PartitionKey partitionKey, Class<T> domainType) { Assert.notNull(partitionKey, "partitionKey should not be null"); Assert.notNull(domainType, "domainType should not be null"); final String containerName = getContainerName(domainType); final CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); cosmosQueryRequestOptions.setPartitionKey(partitionKey); cosmosQueryRequestOptions.setQueryMetricsEnabled(this.queryMetricsEnabled); return cosmosAsyncClient .getDatabase(this.databaseName) .getContainer(containerName) .queryItems("SELECT * FROM r", cosmosQueryRequestOptions, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Flux.fromIterable(cosmosItemFeedResponse.getResults()); }) .map(cosmosItemProperties -> toDomainObject(domainType, cosmosItemProperties)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to find items", throwable)); } /** * Find by id * * @param id the id * @param domainType the domainType * @return Mono with the item or error */ @Override public <T> Mono<T> findById(Object id, Class<T> domainType) { Assert.notNull(domainType, "domainType should not be null"); return findById(getContainerName(domainType), id, domainType); } /** * Find by id * * @param containerName the container name * @param id the id * @param domainType the entity class * @return Mono with the item or error */ @Override public <T> Mono<T> findById(String containerName, Object id, Class<T> domainType) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(domainType, "domainType should not be null"); final String query = String.format("select * from root where root.id = '%s'", CosmosUtils.getStringIDValue(id)); final CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(this.queryMetricsEnabled); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .queryItems(query, options, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Mono.justOrEmpty(cosmosItemFeedResponse .getResults() .stream() .map(cosmosItem -> toDomainObject(domainType, cosmosItem)) .findFirst()); }) .onErrorResume(throwable -> CosmosExceptionUtils.findAPIExceptionHandler("Failed to find item", throwable)) .next(); } /** * Find by id * * @param id the id * @param domainType the entity class * @param partitionKey partition Key * @return Mono with the item or error */ @Override public <T> Mono<T> findById(Object id, Class<T> domainType, PartitionKey partitionKey) { Assert.notNull(domainType, "domainType should not be null"); String idToFind = CosmosUtils.getStringIDValue(id); final String containerName = getContainerName(domainType); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .readItem(idToFind, partitionKey, JsonNode.class) .publishOn(Schedulers.parallel()) .flatMap(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return Mono.justOrEmpty(toDomainObject(domainType, cosmosItemResponse.getItem())); }) .onErrorResume(throwable -> CosmosExceptionUtils.findAPIExceptionHandler("Failed to find item", throwable)); } /** * Insert * * @param <T> type of inserted objectToSave * @param objectToSave the object to save * @param partitionKey the partition key * @return Mono with the item or error */ public <T> Mono<T> insert(T objectToSave, PartitionKey partitionKey) { return insert(getContainerName(objectToSave.getClass()), objectToSave, partitionKey); } /** * Insert * * @param objectToSave the object to save * @param <T> type of inserted objectToSave * @return Mono with the item or error */ public <T> Mono<T> insert(T objectToSave) { return insert(getContainerName(objectToSave.getClass()), objectToSave, null); } /** * Insert * * @param <T> type of inserted objectToSave * @param containerName the container name * @param objectToSave the object to save * @param partitionKey the partition key * @return Mono with the item or error */ public <T> Mono<T> insert(String containerName, Object objectToSave, PartitionKey partitionKey) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(objectToSave, "objectToSave should not be null"); final Class<T> domainType = (Class<T>) objectToSave.getClass(); generateIdIfNullAndAutoGenerationEnabled(objectToSave, domainType); final JsonNode originalItem = prepareToPersistAndConvertToItemProperties(objectToSave); final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName); Mono<CosmosItemResponse<JsonNode>> item; if (partitionKey == null) { item = cosmosAsyncContainer.createItem(originalItem, options); } else { item = cosmosAsyncContainer.createItem(originalItem, partitionKey, options); } return item.publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to insert item", throwable)) .flatMap(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return Mono.just(toDomainObject(domainType, cosmosItemResponse.getItem())); }); } /** * Insert * * @param <T> type of inserted objectToSave * @param containerName the container name * @param objectToSave the object to save * @return Mono with the item or error */ @Override public <T> Mono<T> insert(String containerName, T objectToSave) { return insert(getContainerName(objectToSave.getClass()), objectToSave, null); } @SuppressWarnings("unchecked") private <T> void generateIdIfNullAndAutoGenerationEnabled(T originalItem, Class<?> type) { CosmosEntityInformation<?, ?> entityInfo = CosmosEntityInformation.getInstance(type); if (entityInfo.shouldGenerateId() && ReflectionUtils.getField(entityInfo.getIdField(), originalItem) == null) { ReflectionUtils.setField(entityInfo.getIdField(), originalItem, UUID.randomUUID().toString()); } } /** * Upsert * * @param object the object to upsert * @return Mono with the item or error */ @Override public <T> Mono<T> upsert(T object) { return upsert(getContainerName(object.getClass()), object); } /** * Upsert * * @param containerName the container name * @param object the object to save * @return Mono with the item or error */ @Override public <T> Mono<T> upsert(String containerName, T object) { final Class<T> domainType = (Class<T>) object.getClass(); final JsonNode originalItem = prepareToPersistAndConvertToItemProperties(object); final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); applyVersioning(object.getClass(), originalItem, options); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .upsertItem(originalItem, options) .publishOn(Schedulers.parallel()) .flatMap(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return Mono.just(toDomainObject(domainType, cosmosItemResponse.getItem())); }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to upsert item", throwable)); } /** * Deletes the item with id and partition key. * * @param containerName Container name of database * @param id item id * @param partitionKey the partition key */ @Override public Mono<Void> deleteById(String containerName, Object id, PartitionKey partitionKey) { return deleteById(containerName, id, partitionKey, new CosmosItemRequestOptions()); } private Mono<Void> deleteById(String containerName, Object id, PartitionKey partitionKey, CosmosItemRequestOptions cosmosItemRequestOptions) { Assert.hasText(containerName, "container name should not be null, empty or only whitespaces"); String idToDelete = CosmosUtils.getStringIDValue(id); if (partitionKey == null) { partitionKey = PartitionKey.NONE; } return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .deleteItem(idToDelete, partitionKey, cosmosItemRequestOptions) .publishOn(Schedulers.parallel()) .doOnNext(cosmosItemResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to delete item", throwable)) .then(); } /** * Deletes the entity * * @param <T> type class of domain type * @param containerName Container name of database * @param entity the entity to delete * @return void Mono */ public <T> Mono<Void> deleteEntity(String containerName, T entity) { Assert.notNull(entity, "entity to be deleted should not be null"); @SuppressWarnings("unchecked") final Class<T> domainType = (Class<T>) entity.getClass(); final JsonNode originalItem = mappingCosmosConverter.writeJsonNode(entity); final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); applyVersioning(entity.getClass(), originalItem, options); return deleteItem(originalItem, containerName, domainType).then(); } /** * Delete all items in a container * * @param containerName the container name * @param domainType the domainType * @return void Mono */ @Override public Mono<Void> deleteAll(@NonNull String containerName, @NonNull Class<?> domainType) { Assert.hasText(containerName, "container name should not be null, empty or only whitespaces"); final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return this.delete(query, domainType, containerName).then(); } /** * Delete items matching query * * @param query the document query * @param domainType the entity class * @param containerName the container name * @return Mono */ @Override public <T> Flux<T> delete(CosmosQuery query, Class<T> domainType, String containerName) { Assert.notNull(query, "DocumentQuery should not be null."); Assert.notNull(domainType, "domainType should not be null."); Assert.hasText(containerName, "container name should not be null, empty or only whitespaces"); final Flux<JsonNode> results = findItems(query, containerName); return results.flatMap(d -> deleteItem(d, containerName, domainType)); } /** * Find items * * @param query the document query * @param domainType the entity class * @param containerName the container name * @return Flux with found items or error */ @Override public <T> Flux<T> find(CosmosQuery query, Class<T> domainType, String containerName) { return findItems(query, containerName) .map(cosmosItemProperties -> toDomainObject(domainType, cosmosItemProperties)); } /** * Exists * * @param query the document query * @param domainType the entity class * @param containerName the container name * @return Mono with a boolean or error */ @Override public Mono<Boolean> exists(CosmosQuery query, Class<?> domainType, String containerName) { return count(query, containerName).flatMap(count -> Mono.just(count > 0)); } /** * Exists * * @param id the id * @param domainType the entity class * @param containerName the container name * @return Mono with a boolean or error */ public Mono<Boolean> existsById(Object id, Class<?> domainType, String containerName) { return findById(containerName, id, domainType) .flatMap(o -> Mono.just(o != null)); } /** * Count * * @param containerName the container name * @return Mono with the count or error */ @Override public Mono<Long> count(String containerName) { final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return count(query, containerName); } /** * Count * * @param query the document query * @param containerName the container name * @return Mono with count or error */ @Override public Mono<Long> count(CosmosQuery query, String containerName) { return getCountValue(query, containerName); } @Override public MappingCosmosConverter getConverter() { return mappingCosmosConverter; } private Mono<Long> getCountValue(CosmosQuery query, String containerName) { final SqlQuerySpec querySpec = new CountQueryGenerator().generateCosmos(query); final CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(this.queryMetricsEnabled); return executeQuery(querySpec, containerName, options) .doOnNext(feedResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, feedResponse.getCosmosDiagnostics(), feedResponse)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to get count value", throwable)) .next() .map(r -> r.getResults().get(0).asLong()); } private Flux<FeedResponse<JsonNode>> executeQuery(SqlQuerySpec sqlQuerySpec, String containerName, CosmosQueryRequestOptions options) { return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .queryItems(sqlQuerySpec, options, JsonNode.class) .byPage() .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to execute query", throwable)); } /** * Delete container with container name * * @param containerName the container name */ @Override public void deleteContainer(@NonNull String containerName) { Assert.hasText(containerName, "containerName should have text."); cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .delete() .doOnNext(cosmosContainerResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to delete container", throwable)) .block(); } /** * @param domainType the domain class * @return the container name */ public String getContainerName(Class<?> domainType) { Assert.notNull(domainType, "domainType should not be null"); return CosmosEntityInformation.getInstance(domainType).getContainerName(); } private JsonNode prepareToPersistAndConvertToItemProperties(Object object) { if (cosmosAuditingHandler != null) { cosmosAuditingHandler.markAudited(object); } return mappingCosmosConverter.writeJsonNode(object); } private Flux<JsonNode> findItems(@NonNull CosmosQuery query, @NonNull String containerName) { final SqlQuerySpec sqlQuerySpec = new FindQuerySpecGenerator().generateCosmos(query); final CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); cosmosQueryRequestOptions.setQueryMetricsEnabled(this.queryMetricsEnabled); return cosmosAsyncClient .getDatabase(this.databaseName) .getContainer(containerName) .queryItems(sqlQuerySpec, cosmosQueryRequestOptions, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Flux.fromIterable(cosmosItemFeedResponse.getResults()); }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to query items", throwable)); } private <T> Mono<T> deleteItem(@NonNull JsonNode jsonNode, String containerName, @NonNull Class<T> domainType) { final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); applyVersioning(domainType, jsonNode, options); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .deleteItem(jsonNode, options) .publishOn(Schedulers.parallel()) .map(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return cosmosItemResponse; }) .flatMap(objectCosmosItemResponse -> Mono.just(toDomainObject(domainType, jsonNode))) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to delete item", throwable)); } private <T> T toDomainObject(@NonNull Class<T> domainType, JsonNode jsonNode) { return mappingCosmosConverter.read(domainType, jsonNode); } private void applyVersioning(Class<?> domainType, JsonNode jsonNode, CosmosItemRequestOptions options) { CosmosEntityInformation<?, ?> entityInformation = CosmosEntityInformation.getInstance(domainType); if (entityInformation.isVersioned()) { options.setIfMatchETag(jsonNode.get(Constants.ETAG_PROPERTY_DEFAULT_NAME).asText()); } } }
class ReactiveCosmosTemplate implements ReactiveCosmosOperations, ApplicationContextAware { private final MappingCosmosConverter mappingCosmosConverter; private final String databaseName; private final ResponseDiagnosticsProcessor responseDiagnosticsProcessor; private final boolean queryMetricsEnabled; private final CosmosAsyncClient cosmosAsyncClient; private final IsNewAwareAuditingHandler cosmosAuditingHandler; /** * Initialization * * @param client must not be {@literal null} * @param databaseName must not be {@literal null} * @param cosmosConfig must not be {@literal null} * @param mappingCosmosConverter must not be {@literal null} * @param cosmosAuditingHandler can be {@literal null} */ public ReactiveCosmosTemplate(CosmosAsyncClient client, String databaseName, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter, IsNewAwareAuditingHandler cosmosAuditingHandler) { this(new CosmosFactory(client, databaseName), cosmosConfig, mappingCosmosConverter, cosmosAuditingHandler); } /** * Initialization * * @param client must not be {@literal null} * @param databaseName must not be {@literal null} * @param cosmosConfig must not be {@literal null} * @param mappingCosmosConverter must not be {@literal null} */ public ReactiveCosmosTemplate(CosmosAsyncClient client, String databaseName, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter) { this(new CosmosFactory(client, databaseName), cosmosConfig, mappingCosmosConverter, null); } /** * Constructor * * @param cosmosFactory the cosmos db factory * @param cosmosConfig the cosmos config * @param mappingCosmosConverter the mappingCosmosConverter * @param cosmosAuditingHandler the auditing handler */ public ReactiveCosmosTemplate(CosmosFactory cosmosFactory, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter, IsNewAwareAuditingHandler cosmosAuditingHandler) { Assert.notNull(cosmosFactory, "CosmosFactory must not be null!"); Assert.notNull(cosmosConfig, "CosmosConfig must not be null!"); Assert.notNull(mappingCosmosConverter, "MappingCosmosConverter must not be null!"); this.mappingCosmosConverter = mappingCosmosConverter; this.cosmosAsyncClient = cosmosFactory.getCosmosAsyncClient(); this.databaseName = cosmosFactory.getDatabaseName(); this.responseDiagnosticsProcessor = cosmosConfig.getResponseDiagnosticsProcessor(); this.queryMetricsEnabled = cosmosConfig.isQueryMetricsEnabled(); this.cosmosAuditingHandler = cosmosAuditingHandler; } /** * Initialization * * @param cosmosFactory must not be {@literal null} * @param cosmosConfig must not be {@literal null} * @param mappingCosmosConverter must not be {@literal null} */ public ReactiveCosmosTemplate(CosmosFactory cosmosFactory, CosmosConfig cosmosConfig, MappingCosmosConverter mappingCosmosConverter) { this(cosmosFactory, cosmosConfig, mappingCosmosConverter, null); } /** * @param applicationContext the application context * @throws BeansException the bean exception */ public void setApplicationContext(@NonNull ApplicationContext applicationContext) throws BeansException { } /** * Creates a container if it doesn't already exist * * @param information the CosmosEntityInformation * @return Mono containing CosmosContainerResponse */ @Override /** * Find all items in a given container * * @param containerName the containerName * @param domainType the domainType * @return Flux with all the found items or error */ @Override public <T> Flux<T> findAll(String containerName, Class<T> domainType) { final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return find(query, domainType, containerName); } /** * Find all items in a given container * * @param domainType the domainType * @return Flux with all the found items or error */ @Override public <T> Flux<T> findAll(Class<T> domainType) { return findAll(domainType.getSimpleName(), domainType); } @Override public <T> Flux<T> findAll(PartitionKey partitionKey, Class<T> domainType) { Assert.notNull(partitionKey, "partitionKey should not be null"); Assert.notNull(domainType, "domainType should not be null"); final String containerName = getContainerName(domainType); final CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); cosmosQueryRequestOptions.setPartitionKey(partitionKey); cosmosQueryRequestOptions.setQueryMetricsEnabled(this.queryMetricsEnabled); return cosmosAsyncClient .getDatabase(this.databaseName) .getContainer(containerName) .queryItems("SELECT * FROM r", cosmosQueryRequestOptions, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Flux.fromIterable(cosmosItemFeedResponse.getResults()); }) .map(cosmosItemProperties -> toDomainObject(domainType, cosmosItemProperties)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to find items", throwable)); } /** * Find by id * * @param id the id * @param domainType the domainType * @return Mono with the item or error */ @Override public <T> Mono<T> findById(Object id, Class<T> domainType) { Assert.notNull(domainType, "domainType should not be null"); return findById(getContainerName(domainType), id, domainType); } /** * Find by id * * @param containerName the container name * @param id the id * @param domainType the entity class * @return Mono with the item or error */ @Override public <T> Mono<T> findById(String containerName, Object id, Class<T> domainType) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(domainType, "domainType should not be null"); final String query = String.format("select * from root where root.id = '%s'", CosmosUtils.getStringIDValue(id)); final CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(this.queryMetricsEnabled); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .queryItems(query, options, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Mono.justOrEmpty(cosmosItemFeedResponse .getResults() .stream() .map(cosmosItem -> toDomainObject(domainType, cosmosItem)) .findFirst()); }) .onErrorResume(throwable -> CosmosExceptionUtils.findAPIExceptionHandler("Failed to find item", throwable)) .next(); } /** * Find by id * * @param id the id * @param domainType the entity class * @param partitionKey partition Key * @return Mono with the item or error */ @Override public <T> Mono<T> findById(Object id, Class<T> domainType, PartitionKey partitionKey) { Assert.notNull(domainType, "domainType should not be null"); String idToFind = CosmosUtils.getStringIDValue(id); final String containerName = getContainerName(domainType); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .readItem(idToFind, partitionKey, JsonNode.class) .publishOn(Schedulers.parallel()) .flatMap(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return Mono.justOrEmpty(toDomainObject(domainType, cosmosItemResponse.getItem())); }) .onErrorResume(throwable -> CosmosExceptionUtils.findAPIExceptionHandler("Failed to find item", throwable)); } /** * Insert * * @param <T> type of inserted objectToSave * @param objectToSave the object to save * @param partitionKey the partition key * @return Mono with the item or error */ public <T> Mono<T> insert(T objectToSave, PartitionKey partitionKey) { return insert(getContainerName(objectToSave.getClass()), objectToSave, partitionKey); } /** * Insert * * @param objectToSave the object to save * @param <T> type of inserted objectToSave * @return Mono with the item or error */ public <T> Mono<T> insert(T objectToSave) { return insert(getContainerName(objectToSave.getClass()), objectToSave, null); } /** * Insert * * @param <T> type of inserted objectToSave * @param containerName the container name * @param objectToSave the object to save * @param partitionKey the partition key * @return Mono with the item or error */ public <T> Mono<T> insert(String containerName, Object objectToSave, PartitionKey partitionKey) { Assert.hasText(containerName, "containerName should not be null, empty or only whitespaces"); Assert.notNull(objectToSave, "objectToSave should not be null"); final Class<T> domainType = (Class<T>) objectToSave.getClass(); generateIdIfNullAndAutoGenerationEnabled(objectToSave, domainType); final JsonNode originalItem = prepareToPersistAndConvertToItemProperties(objectToSave); final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); return cosmosAsyncClient .getDatabase(this.databaseName) .getContainer(containerName) .createItem(originalItem, partitionKey, options) .publishOn(Schedulers.parallel()) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to insert item", throwable)) .flatMap(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return Mono.just(toDomainObject(domainType, cosmosItemResponse.getItem())); }); } /** * Insert * * @param <T> type of inserted objectToSave * @param containerName the container name * @param objectToSave the object to save * @return Mono with the item or error */ @Override public <T> Mono<T> insert(String containerName, T objectToSave) { return insert(containerName, objectToSave, null); } @SuppressWarnings("unchecked") private <T> void generateIdIfNullAndAutoGenerationEnabled(T originalItem, Class<?> type) { CosmosEntityInformation<?, ?> entityInfo = CosmosEntityInformation.getInstance(type); if (entityInfo.shouldGenerateId() && ReflectionUtils.getField(entityInfo.getIdField(), originalItem) == null) { ReflectionUtils.setField(entityInfo.getIdField(), originalItem, UUID.randomUUID().toString()); } } /** * Upsert * * @param object the object to upsert * @return Mono with the item or error */ @Override public <T> Mono<T> upsert(T object) { return upsert(getContainerName(object.getClass()), object); } /** * Upsert * * @param containerName the container name * @param object the object to save * @return Mono with the item or error */ @Override public <T> Mono<T> upsert(String containerName, T object) { final Class<T> domainType = (Class<T>) object.getClass(); final JsonNode originalItem = prepareToPersistAndConvertToItemProperties(object); final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); applyVersioning(object.getClass(), originalItem, options); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .upsertItem(originalItem, options) .publishOn(Schedulers.parallel()) .flatMap(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return Mono.just(toDomainObject(domainType, cosmosItemResponse.getItem())); }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to upsert item", throwable)); } /** * Deletes the item with id and partition key. * * @param containerName Container name of database * @param id item id * @param partitionKey the partition key */ @Override public Mono<Void> deleteById(String containerName, Object id, PartitionKey partitionKey) { return deleteById(containerName, id, partitionKey, new CosmosItemRequestOptions()); } private Mono<Void> deleteById(String containerName, Object id, PartitionKey partitionKey, CosmosItemRequestOptions cosmosItemRequestOptions) { Assert.hasText(containerName, "container name should not be null, empty or only whitespaces"); String idToDelete = CosmosUtils.getStringIDValue(id); if (partitionKey == null) { partitionKey = PartitionKey.NONE; } return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .deleteItem(idToDelete, partitionKey, cosmosItemRequestOptions) .publishOn(Schedulers.parallel()) .doOnNext(cosmosItemResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to delete item", throwable)) .then(); } /** * Deletes the entity * * @param <T> type class of domain type * @param containerName Container name of database * @param entity the entity to delete * @return void Mono */ public <T> Mono<Void> deleteEntity(String containerName, T entity) { Assert.notNull(entity, "entity to be deleted should not be null"); @SuppressWarnings("unchecked") final Class<T> domainType = (Class<T>) entity.getClass(); final JsonNode originalItem = mappingCosmosConverter.writeJsonNode(entity); final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); applyVersioning(entity.getClass(), originalItem, options); return deleteItem(originalItem, containerName, domainType).then(); } /** * Delete all items in a container * * @param containerName the container name * @param domainType the domainType * @return void Mono */ @Override public Mono<Void> deleteAll(@NonNull String containerName, @NonNull Class<?> domainType) { Assert.hasText(containerName, "container name should not be null, empty or only whitespaces"); final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return this.delete(query, domainType, containerName).then(); } /** * Delete items matching query * * @param query the document query * @param domainType the entity class * @param containerName the container name * @return Mono */ @Override public <T> Flux<T> delete(CosmosQuery query, Class<T> domainType, String containerName) { Assert.notNull(query, "DocumentQuery should not be null."); Assert.notNull(domainType, "domainType should not be null."); Assert.hasText(containerName, "container name should not be null, empty or only whitespaces"); final Flux<JsonNode> results = findItems(query, containerName); return results.flatMap(d -> deleteItem(d, containerName, domainType)); } /** * Find items * * @param query the document query * @param domainType the entity class * @param containerName the container name * @return Flux with found items or error */ @Override public <T> Flux<T> find(CosmosQuery query, Class<T> domainType, String containerName) { return findItems(query, containerName) .map(cosmosItemProperties -> toDomainObject(domainType, cosmosItemProperties)); } /** * Exists * * @param query the document query * @param domainType the entity class * @param containerName the container name * @return Mono with a boolean or error */ @Override public Mono<Boolean> exists(CosmosQuery query, Class<?> domainType, String containerName) { return count(query, containerName).flatMap(count -> Mono.just(count > 0)); } /** * Exists * * @param id the id * @param domainType the entity class * @param containerName the container name * @return Mono with a boolean or error */ public Mono<Boolean> existsById(Object id, Class<?> domainType, String containerName) { return findById(containerName, id, domainType) .flatMap(o -> Mono.just(o != null)); } /** * Count * * @param containerName the container name * @return Mono with the count or error */ @Override public Mono<Long> count(String containerName) { final CosmosQuery query = new CosmosQuery(Criteria.getInstance(CriteriaType.ALL)); return count(query, containerName); } /** * Count * * @param query the document query * @param containerName the container name * @return Mono with count or error */ @Override public Mono<Long> count(CosmosQuery query, String containerName) { return getCountValue(query, containerName); } @Override public MappingCosmosConverter getConverter() { return mappingCosmosConverter; } private Mono<Long> getCountValue(CosmosQuery query, String containerName) { final SqlQuerySpec querySpec = new CountQueryGenerator().generateCosmos(query); final CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setQueryMetricsEnabled(this.queryMetricsEnabled); return executeQuery(querySpec, containerName, options) .doOnNext(feedResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, feedResponse.getCosmosDiagnostics(), feedResponse)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to get count value", throwable)) .next() .map(r -> r.getResults().get(0).asLong()); } private Flux<FeedResponse<JsonNode>> executeQuery(SqlQuerySpec sqlQuerySpec, String containerName, CosmosQueryRequestOptions options) { return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .queryItems(sqlQuerySpec, options, JsonNode.class) .byPage() .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to execute query", throwable)); } /** * Delete container with container name * * @param containerName the container name */ @Override public void deleteContainer(@NonNull String containerName) { Assert.hasText(containerName, "containerName should have text."); cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .delete() .doOnNext(cosmosContainerResponse -> CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosContainerResponse.getDiagnostics(), null)) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to delete container", throwable)) .block(); } /** * @param domainType the domain class * @return the container name */ public String getContainerName(Class<?> domainType) { Assert.notNull(domainType, "domainType should not be null"); return CosmosEntityInformation.getInstance(domainType).getContainerName(); } private JsonNode prepareToPersistAndConvertToItemProperties(Object object) { if (cosmosAuditingHandler != null) { cosmosAuditingHandler.markAudited(object); } return mappingCosmosConverter.writeJsonNode(object); } private Flux<JsonNode> findItems(@NonNull CosmosQuery query, @NonNull String containerName) { final SqlQuerySpec sqlQuerySpec = new FindQuerySpecGenerator().generateCosmos(query); final CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); cosmosQueryRequestOptions.setQueryMetricsEnabled(this.queryMetricsEnabled); return cosmosAsyncClient .getDatabase(this.databaseName) .getContainer(containerName) .queryItems(sqlQuerySpec, cosmosQueryRequestOptions, JsonNode.class) .byPage() .publishOn(Schedulers.parallel()) .flatMap(cosmosItemFeedResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemFeedResponse.getCosmosDiagnostics(), cosmosItemFeedResponse); return Flux.fromIterable(cosmosItemFeedResponse.getResults()); }) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to query items", throwable)); } private <T> Mono<T> deleteItem(@NonNull JsonNode jsonNode, String containerName, @NonNull Class<T> domainType) { final CosmosItemRequestOptions options = new CosmosItemRequestOptions(); applyVersioning(domainType, jsonNode, options); return cosmosAsyncClient.getDatabase(this.databaseName) .getContainer(containerName) .deleteItem(jsonNode, options) .publishOn(Schedulers.parallel()) .map(cosmosItemResponse -> { CosmosUtils.fillAndProcessResponseDiagnostics(this.responseDiagnosticsProcessor, cosmosItemResponse.getDiagnostics(), null); return cosmosItemResponse; }) .flatMap(objectCosmosItemResponse -> Mono.just(toDomainObject(domainType, jsonNode))) .onErrorResume(throwable -> CosmosExceptionUtils.exceptionHandler("Failed to delete item", throwable)); } private <T> T toDomainObject(@NonNull Class<T> domainType, JsonNode jsonNode) { return mappingCosmosConverter.read(domainType, jsonNode); } private void applyVersioning(Class<?> domainType, JsonNode jsonNode, CosmosItemRequestOptions options) { CosmosEntityInformation<?, ?> entityInformation = CosmosEntityInformation.getInstance(domainType); if (entityInformation.isVersioned()) { options.setIfMatchETag(jsonNode.get(Constants.ETAG_PROPERTY_DEFAULT_NAME).asText()); } } }
how about setting on variable declaration itself
public CosmosQueryRequestOptions() { this.queryMetricsEnabled = true; }
this.queryMetricsEnabled = true;
public CosmosQueryRequestOptions() { this.queryMetricsEnabled = true; }
class CosmosQueryRequestOptions { private String sessionToken; private String partitionKeyRangeId; private Boolean scanInQueryEnabled; private Boolean emitVerboseTracesInQuery; private int maxDegreeOfParallelism; private int maxBufferedItemCount; private int responseContinuationTokenLimitInKb; private Integer maxItemCount; private String requestContinuation; private PartitionKey partitionkey; private boolean queryMetricsEnabled; private Map<String, Object> properties; private boolean emptyPagesAllowed; /** * Instantiates a new query request options. */ /** * Instantiates a new query request options. * * @param options the options */ CosmosQueryRequestOptions(CosmosQueryRequestOptions options) { this.sessionToken = options.sessionToken; this.partitionKeyRangeId = options.partitionKeyRangeId; this.scanInQueryEnabled = options.scanInQueryEnabled; this.emitVerboseTracesInQuery = options.emitVerboseTracesInQuery; this.maxDegreeOfParallelism = options.maxDegreeOfParallelism; this.maxBufferedItemCount = options.maxBufferedItemCount; this.responseContinuationTokenLimitInKb = options.responseContinuationTokenLimitInKb; this.maxItemCount = options.maxItemCount; this.requestContinuation = options.requestContinuation; this.partitionkey = options.partitionkey; this.queryMetricsEnabled = options.queryMetricsEnabled; this.emptyPagesAllowed = options.emptyPagesAllowed; } /** * Gets the partitionKeyRangeId. * * @return the partitionKeyRangeId. */ String getPartitionKeyRangeIdInternal() { return this.partitionKeyRangeId; } /** * Sets the partitionKeyRangeId. * * @param partitionKeyRangeId the partitionKeyRangeId. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setPartitionKeyRangeIdInternal(String partitionKeyRangeId) { this.partitionKeyRangeId = partitionKeyRangeId; return this; } /** * Gets the session token for use with session consistency. * * @return the session token. */ public String getSessionToken() { return this.sessionToken; } /** * Sets the session token for use with session consistency. * * @param sessionToken the session token. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setSessionToken(String sessionToken) { this.sessionToken = sessionToken; return this; } /** * Gets the option to allow scan on the queries which couldn't be served as * indexing was opted out on the requested paths. * * @return the option of enable scan in query. */ public Boolean isScanInQueryEnabled() { return this.scanInQueryEnabled; } /** * Sets the option to allow scan on the queries which couldn't be served as * indexing was opted out on the requested paths. * * @param scanInQueryEnabled the option of enable scan in query. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setScanInQueryEnabled(Boolean scanInQueryEnabled) { this.scanInQueryEnabled = scanInQueryEnabled; return this; } /** * Gets the option to allow queries to emit out verbose traces for * investigation. * * @return the emit verbose traces in query. */ Boolean isEmitVerboseTracesInQuery() { return this.emitVerboseTracesInQuery; } /** * Sets the option to allow queries to emit out verbose traces for * investigation. * * @param emitVerboseTracesInQuery the emit verbose traces in query. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setEmitVerboseTracesInQuery(Boolean emitVerboseTracesInQuery) { this.emitVerboseTracesInQuery = emitVerboseTracesInQuery; return this; } /** * Gets the number of concurrent operations run client side during parallel * query execution. * * @return number of concurrent operations run client side during parallel query * execution. */ public int getMaxDegreeOfParallelism() { return maxDegreeOfParallelism; } /** * Sets the number of concurrent operations run client side during parallel * query execution. * * @param maxDegreeOfParallelism number of concurrent operations. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setMaxDegreeOfParallelism(int maxDegreeOfParallelism) { this.maxDegreeOfParallelism = maxDegreeOfParallelism; return this; } /** * Gets the maximum number of items that can be buffered client side during * parallel query execution. * * @return maximum number of items that can be buffered client side during * parallel query execution. */ public int getMaxBufferedItemCount() { return maxBufferedItemCount; } /** * Sets the maximum number of items that can be buffered client side during * parallel query execution. * * @param maxBufferedItemCount maximum number of items. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setMaxBufferedItemCount(int maxBufferedItemCount) { this.maxBufferedItemCount = maxBufferedItemCount; return this; } /** * Sets the ResponseContinuationTokenLimitInKb request option for item query * requests in the Azure Cosmos DB service. * <p> * ResponseContinuationTokenLimitInKb is used to limit the length of * continuation token in the query response. Valid values are &gt;= 1. * <p> * The continuation token contains both required and optional fields. The * required fields are necessary for resuming the execution from where it was * stooped. The optional fields may contain serialized index lookup work that * was done but not yet utilized. This avoids redoing the work again in * subsequent continuations and hence improve the query performance. Setting the * maximum continuation size to 1KB, the Azure Cosmos DB service will only * serialize required fields. Starting from 2KB, the Azure Cosmos DB service * would serialize as much as it could fit till it reaches the maximum specified * size. * * @param limitInKb continuation token size limit. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setResponseContinuationTokenLimitInKb(int limitInKb) { this.responseContinuationTokenLimitInKb = limitInKb; return this; } /** * Gets the ResponseContinuationTokenLimitInKb request option for item query * requests in the Azure Cosmos DB service. If not already set returns 0. * <p> * ResponseContinuationTokenLimitInKb is used to limit the length of * continuation token in the query response. Valid values are &gt;= 1. * * @return return set ResponseContinuationTokenLimitInKb, or 0 if not set */ public int getResponseContinuationTokenLimitInKb() { return responseContinuationTokenLimitInKb; } /** * Gets the maximum number of items to be returned in the enumeration * operation. * * @return the max number of items. */ Integer getMaxItemCount() { return this.maxItemCount; } /** * Sets the maximum number of items to be returned in the enumeration * operation. * * @param maxItemCount the max number of items. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setMaxItemCount(Integer maxItemCount) { this.maxItemCount = maxItemCount; return this; } /** * Gets the request continuation token. * * @return the request continuation. */ String getRequestContinuation() { return this.requestContinuation; } /** * Sets the request continuation token. * * @param requestContinuation the request continuation. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setRequestContinuation(String requestContinuation) { this.requestContinuation = requestContinuation; return this; } /** * Gets the partition key used to identify the current request's target * partition. * * @return the partition key. */ public PartitionKey getPartitionKey() { return this.partitionkey; } /** * Sets the partition key used to identify the current request's target * partition. * * @param partitionkey the partition key value. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setPartitionKey(PartitionKey partitionkey) { this.partitionkey = partitionkey; return this; } /** * Gets the option to enable populate query metrics * * @return whether to enable populate query metrics */ public boolean isQueryMetricsEnabled() { return queryMetricsEnabled; } /** * Sets the option to enable/disable getting metrics relating to query execution on item query requests * * @param queryMetricsEnabled whether to enable or disable query metrics * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setQueryMetricsEnabled(boolean queryMetricsEnabled) { this.queryMetricsEnabled = queryMetricsEnabled; return this; } /** * Gets the properties * * @return Map of request options properties */ Map<String, Object> getProperties() { return properties; } /** * Sets the properties used to identify the request token. * * @param properties the properties. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setProperties(Map<String, Object> properties) { this.properties = properties; return this; } /** * Gets the option to allow empty result pages in feed response. * * @return whether to enable allow empty pages or not */ boolean isEmptyPagesAllowed() { return emptyPagesAllowed; } /** * Sets the option to allow empty result pages in feed response. Defaults to false * @param emptyPagesAllowed whether to allow empty pages in feed response * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setEmptyPagesAllowed(boolean emptyPagesAllowed) { this.emptyPagesAllowed = emptyPagesAllowed; return this; } }
class CosmosQueryRequestOptions { private String sessionToken; private String partitionKeyRangeId; private Boolean scanInQueryEnabled; private Boolean emitVerboseTracesInQuery; private int maxDegreeOfParallelism; private int maxBufferedItemCount; private int responseContinuationTokenLimitInKb; private Integer maxItemCount; private String requestContinuation; private PartitionKey partitionkey; private boolean queryMetricsEnabled; private Map<String, Object> properties; private boolean emptyPagesAllowed; /** * Instantiates a new query request options. */ /** * Instantiates a new query request options. * * @param options the options */ CosmosQueryRequestOptions(CosmosQueryRequestOptions options) { this.sessionToken = options.sessionToken; this.partitionKeyRangeId = options.partitionKeyRangeId; this.scanInQueryEnabled = options.scanInQueryEnabled; this.emitVerboseTracesInQuery = options.emitVerboseTracesInQuery; this.maxDegreeOfParallelism = options.maxDegreeOfParallelism; this.maxBufferedItemCount = options.maxBufferedItemCount; this.responseContinuationTokenLimitInKb = options.responseContinuationTokenLimitInKb; this.maxItemCount = options.maxItemCount; this.requestContinuation = options.requestContinuation; this.partitionkey = options.partitionkey; this.queryMetricsEnabled = options.queryMetricsEnabled; this.emptyPagesAllowed = options.emptyPagesAllowed; } /** * Gets the partitionKeyRangeId. * * @return the partitionKeyRangeId. */ String getPartitionKeyRangeIdInternal() { return this.partitionKeyRangeId; } /** * Sets the partitionKeyRangeId. * * @param partitionKeyRangeId the partitionKeyRangeId. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setPartitionKeyRangeIdInternal(String partitionKeyRangeId) { this.partitionKeyRangeId = partitionKeyRangeId; return this; } /** * Gets the session token for use with session consistency. * * @return the session token. */ public String getSessionToken() { return this.sessionToken; } /** * Sets the session token for use with session consistency. * * @param sessionToken the session token. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setSessionToken(String sessionToken) { this.sessionToken = sessionToken; return this; } /** * Gets the option to allow scan on the queries which couldn't be served as * indexing was opted out on the requested paths. * * @return the option of enable scan in query. */ public Boolean isScanInQueryEnabled() { return this.scanInQueryEnabled; } /** * Sets the option to allow scan on the queries which couldn't be served as * indexing was opted out on the requested paths. * * @param scanInQueryEnabled the option of enable scan in query. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setScanInQueryEnabled(Boolean scanInQueryEnabled) { this.scanInQueryEnabled = scanInQueryEnabled; return this; } /** * Gets the option to allow queries to emit out verbose traces for * investigation. * * @return the emit verbose traces in query. */ Boolean isEmitVerboseTracesInQuery() { return this.emitVerboseTracesInQuery; } /** * Sets the option to allow queries to emit out verbose traces for * investigation. * * @param emitVerboseTracesInQuery the emit verbose traces in query. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setEmitVerboseTracesInQuery(Boolean emitVerboseTracesInQuery) { this.emitVerboseTracesInQuery = emitVerboseTracesInQuery; return this; } /** * Gets the number of concurrent operations run client side during parallel * query execution. * * @return number of concurrent operations run client side during parallel query * execution. */ public int getMaxDegreeOfParallelism() { return maxDegreeOfParallelism; } /** * Sets the number of concurrent operations run client side during parallel * query execution. * * @param maxDegreeOfParallelism number of concurrent operations. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setMaxDegreeOfParallelism(int maxDegreeOfParallelism) { this.maxDegreeOfParallelism = maxDegreeOfParallelism; return this; } /** * Gets the maximum number of items that can be buffered client side during * parallel query execution. * * @return maximum number of items that can be buffered client side during * parallel query execution. */ public int getMaxBufferedItemCount() { return maxBufferedItemCount; } /** * Sets the maximum number of items that can be buffered client side during * parallel query execution. * * @param maxBufferedItemCount maximum number of items. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setMaxBufferedItemCount(int maxBufferedItemCount) { this.maxBufferedItemCount = maxBufferedItemCount; return this; } /** * Sets the ResponseContinuationTokenLimitInKb request option for item query * requests in the Azure Cosmos DB service. * <p> * ResponseContinuationTokenLimitInKb is used to limit the length of * continuation token in the query response. Valid values are &gt;= 1. * <p> * The continuation token contains both required and optional fields. The * required fields are necessary for resuming the execution from where it was * stooped. The optional fields may contain serialized index lookup work that * was done but not yet utilized. This avoids redoing the work again in * subsequent continuations and hence improve the query performance. Setting the * maximum continuation size to 1KB, the Azure Cosmos DB service will only * serialize required fields. Starting from 2KB, the Azure Cosmos DB service * would serialize as much as it could fit till it reaches the maximum specified * size. * * @param limitInKb continuation token size limit. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setResponseContinuationTokenLimitInKb(int limitInKb) { this.responseContinuationTokenLimitInKb = limitInKb; return this; } /** * Gets the ResponseContinuationTokenLimitInKb request option for item query * requests in the Azure Cosmos DB service. If not already set returns 0. * <p> * ResponseContinuationTokenLimitInKb is used to limit the length of * continuation token in the query response. Valid values are &gt;= 1. * * @return return set ResponseContinuationTokenLimitInKb, or 0 if not set */ public int getResponseContinuationTokenLimitInKb() { return responseContinuationTokenLimitInKb; } /** * Gets the maximum number of items to be returned in the enumeration * operation. * * @return the max number of items. */ Integer getMaxItemCount() { return this.maxItemCount; } /** * Sets the maximum number of items to be returned in the enumeration * operation. * * @param maxItemCount the max number of items. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setMaxItemCount(Integer maxItemCount) { this.maxItemCount = maxItemCount; return this; } /** * Gets the request continuation token. * * @return the request continuation. */ String getRequestContinuation() { return this.requestContinuation; } /** * Sets the request continuation token. * * @param requestContinuation the request continuation. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setRequestContinuation(String requestContinuation) { this.requestContinuation = requestContinuation; return this; } /** * Gets the partition key used to identify the current request's target * partition. * * @return the partition key. */ public PartitionKey getPartitionKey() { return this.partitionkey; } /** * Sets the partition key used to identify the current request's target * partition. * * @param partitionkey the partition key value. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setPartitionKey(PartitionKey partitionkey) { this.partitionkey = partitionkey; return this; } /** * Gets the option to enable populate query metrics. By default query metrics are enabled. * * @return whether to enable populate query metrics (default: true) */ public boolean isQueryMetricsEnabled() { return queryMetricsEnabled; } /** * Sets the option to enable/disable getting metrics relating to query execution on item query requests. * By default query metrics are enabled. * * @param queryMetricsEnabled whether to enable or disable query metrics * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setQueryMetricsEnabled(boolean queryMetricsEnabled) { this.queryMetricsEnabled = queryMetricsEnabled; return this; } /** * Gets the properties * * @return Map of request options properties */ Map<String, Object> getProperties() { return properties; } /** * Sets the properties used to identify the request token. * * @param properties the properties. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setProperties(Map<String, Object> properties) { this.properties = properties; return this; } /** * Gets the option to allow empty result pages in feed response. * * @return whether to enable allow empty pages or not */ boolean isEmptyPagesAllowed() { return emptyPagesAllowed; } /** * Sets the option to allow empty result pages in feed response. Defaults to false * @param emptyPagesAllowed whether to allow empty pages in feed response * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setEmptyPagesAllowed(boolean emptyPagesAllowed) { this.emptyPagesAllowed = emptyPagesAllowed; return this; } }
Looks like the most common pattern has been to initialize in the ctor if a ctor is present - CosmosDatabaseProperties, CosmosTriggerProperties etc. - so followed that approach for consistency.
public CosmosQueryRequestOptions() { this.queryMetricsEnabled = true; }
this.queryMetricsEnabled = true;
public CosmosQueryRequestOptions() { this.queryMetricsEnabled = true; }
class CosmosQueryRequestOptions { private String sessionToken; private String partitionKeyRangeId; private Boolean scanInQueryEnabled; private Boolean emitVerboseTracesInQuery; private int maxDegreeOfParallelism; private int maxBufferedItemCount; private int responseContinuationTokenLimitInKb; private Integer maxItemCount; private String requestContinuation; private PartitionKey partitionkey; private boolean queryMetricsEnabled; private Map<String, Object> properties; private boolean emptyPagesAllowed; /** * Instantiates a new query request options. */ /** * Instantiates a new query request options. * * @param options the options */ CosmosQueryRequestOptions(CosmosQueryRequestOptions options) { this.sessionToken = options.sessionToken; this.partitionKeyRangeId = options.partitionKeyRangeId; this.scanInQueryEnabled = options.scanInQueryEnabled; this.emitVerboseTracesInQuery = options.emitVerboseTracesInQuery; this.maxDegreeOfParallelism = options.maxDegreeOfParallelism; this.maxBufferedItemCount = options.maxBufferedItemCount; this.responseContinuationTokenLimitInKb = options.responseContinuationTokenLimitInKb; this.maxItemCount = options.maxItemCount; this.requestContinuation = options.requestContinuation; this.partitionkey = options.partitionkey; this.queryMetricsEnabled = options.queryMetricsEnabled; this.emptyPagesAllowed = options.emptyPagesAllowed; } /** * Gets the partitionKeyRangeId. * * @return the partitionKeyRangeId. */ String getPartitionKeyRangeIdInternal() { return this.partitionKeyRangeId; } /** * Sets the partitionKeyRangeId. * * @param partitionKeyRangeId the partitionKeyRangeId. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setPartitionKeyRangeIdInternal(String partitionKeyRangeId) { this.partitionKeyRangeId = partitionKeyRangeId; return this; } /** * Gets the session token for use with session consistency. * * @return the session token. */ public String getSessionToken() { return this.sessionToken; } /** * Sets the session token for use with session consistency. * * @param sessionToken the session token. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setSessionToken(String sessionToken) { this.sessionToken = sessionToken; return this; } /** * Gets the option to allow scan on the queries which couldn't be served as * indexing was opted out on the requested paths. * * @return the option of enable scan in query. */ public Boolean isScanInQueryEnabled() { return this.scanInQueryEnabled; } /** * Sets the option to allow scan on the queries which couldn't be served as * indexing was opted out on the requested paths. * * @param scanInQueryEnabled the option of enable scan in query. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setScanInQueryEnabled(Boolean scanInQueryEnabled) { this.scanInQueryEnabled = scanInQueryEnabled; return this; } /** * Gets the option to allow queries to emit out verbose traces for * investigation. * * @return the emit verbose traces in query. */ Boolean isEmitVerboseTracesInQuery() { return this.emitVerboseTracesInQuery; } /** * Sets the option to allow queries to emit out verbose traces for * investigation. * * @param emitVerboseTracesInQuery the emit verbose traces in query. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setEmitVerboseTracesInQuery(Boolean emitVerboseTracesInQuery) { this.emitVerboseTracesInQuery = emitVerboseTracesInQuery; return this; } /** * Gets the number of concurrent operations run client side during parallel * query execution. * * @return number of concurrent operations run client side during parallel query * execution. */ public int getMaxDegreeOfParallelism() { return maxDegreeOfParallelism; } /** * Sets the number of concurrent operations run client side during parallel * query execution. * * @param maxDegreeOfParallelism number of concurrent operations. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setMaxDegreeOfParallelism(int maxDegreeOfParallelism) { this.maxDegreeOfParallelism = maxDegreeOfParallelism; return this; } /** * Gets the maximum number of items that can be buffered client side during * parallel query execution. * * @return maximum number of items that can be buffered client side during * parallel query execution. */ public int getMaxBufferedItemCount() { return maxBufferedItemCount; } /** * Sets the maximum number of items that can be buffered client side during * parallel query execution. * * @param maxBufferedItemCount maximum number of items. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setMaxBufferedItemCount(int maxBufferedItemCount) { this.maxBufferedItemCount = maxBufferedItemCount; return this; } /** * Sets the ResponseContinuationTokenLimitInKb request option for item query * requests in the Azure Cosmos DB service. * <p> * ResponseContinuationTokenLimitInKb is used to limit the length of * continuation token in the query response. Valid values are &gt;= 1. * <p> * The continuation token contains both required and optional fields. The * required fields are necessary for resuming the execution from where it was * stooped. The optional fields may contain serialized index lookup work that * was done but not yet utilized. This avoids redoing the work again in * subsequent continuations and hence improve the query performance. Setting the * maximum continuation size to 1KB, the Azure Cosmos DB service will only * serialize required fields. Starting from 2KB, the Azure Cosmos DB service * would serialize as much as it could fit till it reaches the maximum specified * size. * * @param limitInKb continuation token size limit. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setResponseContinuationTokenLimitInKb(int limitInKb) { this.responseContinuationTokenLimitInKb = limitInKb; return this; } /** * Gets the ResponseContinuationTokenLimitInKb request option for item query * requests in the Azure Cosmos DB service. If not already set returns 0. * <p> * ResponseContinuationTokenLimitInKb is used to limit the length of * continuation token in the query response. Valid values are &gt;= 1. * * @return return set ResponseContinuationTokenLimitInKb, or 0 if not set */ public int getResponseContinuationTokenLimitInKb() { return responseContinuationTokenLimitInKb; } /** * Gets the maximum number of items to be returned in the enumeration * operation. * * @return the max number of items. */ Integer getMaxItemCount() { return this.maxItemCount; } /** * Sets the maximum number of items to be returned in the enumeration * operation. * * @param maxItemCount the max number of items. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setMaxItemCount(Integer maxItemCount) { this.maxItemCount = maxItemCount; return this; } /** * Gets the request continuation token. * * @return the request continuation. */ String getRequestContinuation() { return this.requestContinuation; } /** * Sets the request continuation token. * * @param requestContinuation the request continuation. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setRequestContinuation(String requestContinuation) { this.requestContinuation = requestContinuation; return this; } /** * Gets the partition key used to identify the current request's target * partition. * * @return the partition key. */ public PartitionKey getPartitionKey() { return this.partitionkey; } /** * Sets the partition key used to identify the current request's target * partition. * * @param partitionkey the partition key value. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setPartitionKey(PartitionKey partitionkey) { this.partitionkey = partitionkey; return this; } /** * Gets the option to enable populate query metrics * * @return whether to enable populate query metrics */ public boolean isQueryMetricsEnabled() { return queryMetricsEnabled; } /** * Sets the option to enable/disable getting metrics relating to query execution on item query requests * * @param queryMetricsEnabled whether to enable or disable query metrics * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setQueryMetricsEnabled(boolean queryMetricsEnabled) { this.queryMetricsEnabled = queryMetricsEnabled; return this; } /** * Gets the properties * * @return Map of request options properties */ Map<String, Object> getProperties() { return properties; } /** * Sets the properties used to identify the request token. * * @param properties the properties. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setProperties(Map<String, Object> properties) { this.properties = properties; return this; } /** * Gets the option to allow empty result pages in feed response. * * @return whether to enable allow empty pages or not */ boolean isEmptyPagesAllowed() { return emptyPagesAllowed; } /** * Sets the option to allow empty result pages in feed response. Defaults to false * @param emptyPagesAllowed whether to allow empty pages in feed response * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setEmptyPagesAllowed(boolean emptyPagesAllowed) { this.emptyPagesAllowed = emptyPagesAllowed; return this; } }
class CosmosQueryRequestOptions { private String sessionToken; private String partitionKeyRangeId; private Boolean scanInQueryEnabled; private Boolean emitVerboseTracesInQuery; private int maxDegreeOfParallelism; private int maxBufferedItemCount; private int responseContinuationTokenLimitInKb; private Integer maxItemCount; private String requestContinuation; private PartitionKey partitionkey; private boolean queryMetricsEnabled; private Map<String, Object> properties; private boolean emptyPagesAllowed; /** * Instantiates a new query request options. */ /** * Instantiates a new query request options. * * @param options the options */ CosmosQueryRequestOptions(CosmosQueryRequestOptions options) { this.sessionToken = options.sessionToken; this.partitionKeyRangeId = options.partitionKeyRangeId; this.scanInQueryEnabled = options.scanInQueryEnabled; this.emitVerboseTracesInQuery = options.emitVerboseTracesInQuery; this.maxDegreeOfParallelism = options.maxDegreeOfParallelism; this.maxBufferedItemCount = options.maxBufferedItemCount; this.responseContinuationTokenLimitInKb = options.responseContinuationTokenLimitInKb; this.maxItemCount = options.maxItemCount; this.requestContinuation = options.requestContinuation; this.partitionkey = options.partitionkey; this.queryMetricsEnabled = options.queryMetricsEnabled; this.emptyPagesAllowed = options.emptyPagesAllowed; } /** * Gets the partitionKeyRangeId. * * @return the partitionKeyRangeId. */ String getPartitionKeyRangeIdInternal() { return this.partitionKeyRangeId; } /** * Sets the partitionKeyRangeId. * * @param partitionKeyRangeId the partitionKeyRangeId. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setPartitionKeyRangeIdInternal(String partitionKeyRangeId) { this.partitionKeyRangeId = partitionKeyRangeId; return this; } /** * Gets the session token for use with session consistency. * * @return the session token. */ public String getSessionToken() { return this.sessionToken; } /** * Sets the session token for use with session consistency. * * @param sessionToken the session token. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setSessionToken(String sessionToken) { this.sessionToken = sessionToken; return this; } /** * Gets the option to allow scan on the queries which couldn't be served as * indexing was opted out on the requested paths. * * @return the option of enable scan in query. */ public Boolean isScanInQueryEnabled() { return this.scanInQueryEnabled; } /** * Sets the option to allow scan on the queries which couldn't be served as * indexing was opted out on the requested paths. * * @param scanInQueryEnabled the option of enable scan in query. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setScanInQueryEnabled(Boolean scanInQueryEnabled) { this.scanInQueryEnabled = scanInQueryEnabled; return this; } /** * Gets the option to allow queries to emit out verbose traces for * investigation. * * @return the emit verbose traces in query. */ Boolean isEmitVerboseTracesInQuery() { return this.emitVerboseTracesInQuery; } /** * Sets the option to allow queries to emit out verbose traces for * investigation. * * @param emitVerboseTracesInQuery the emit verbose traces in query. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setEmitVerboseTracesInQuery(Boolean emitVerboseTracesInQuery) { this.emitVerboseTracesInQuery = emitVerboseTracesInQuery; return this; } /** * Gets the number of concurrent operations run client side during parallel * query execution. * * @return number of concurrent operations run client side during parallel query * execution. */ public int getMaxDegreeOfParallelism() { return maxDegreeOfParallelism; } /** * Sets the number of concurrent operations run client side during parallel * query execution. * * @param maxDegreeOfParallelism number of concurrent operations. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setMaxDegreeOfParallelism(int maxDegreeOfParallelism) { this.maxDegreeOfParallelism = maxDegreeOfParallelism; return this; } /** * Gets the maximum number of items that can be buffered client side during * parallel query execution. * * @return maximum number of items that can be buffered client side during * parallel query execution. */ public int getMaxBufferedItemCount() { return maxBufferedItemCount; } /** * Sets the maximum number of items that can be buffered client side during * parallel query execution. * * @param maxBufferedItemCount maximum number of items. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setMaxBufferedItemCount(int maxBufferedItemCount) { this.maxBufferedItemCount = maxBufferedItemCount; return this; } /** * Sets the ResponseContinuationTokenLimitInKb request option for item query * requests in the Azure Cosmos DB service. * <p> * ResponseContinuationTokenLimitInKb is used to limit the length of * continuation token in the query response. Valid values are &gt;= 1. * <p> * The continuation token contains both required and optional fields. The * required fields are necessary for resuming the execution from where it was * stooped. The optional fields may contain serialized index lookup work that * was done but not yet utilized. This avoids redoing the work again in * subsequent continuations and hence improve the query performance. Setting the * maximum continuation size to 1KB, the Azure Cosmos DB service will only * serialize required fields. Starting from 2KB, the Azure Cosmos DB service * would serialize as much as it could fit till it reaches the maximum specified * size. * * @param limitInKb continuation token size limit. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setResponseContinuationTokenLimitInKb(int limitInKb) { this.responseContinuationTokenLimitInKb = limitInKb; return this; } /** * Gets the ResponseContinuationTokenLimitInKb request option for item query * requests in the Azure Cosmos DB service. If not already set returns 0. * <p> * ResponseContinuationTokenLimitInKb is used to limit the length of * continuation token in the query response. Valid values are &gt;= 1. * * @return return set ResponseContinuationTokenLimitInKb, or 0 if not set */ public int getResponseContinuationTokenLimitInKb() { return responseContinuationTokenLimitInKb; } /** * Gets the maximum number of items to be returned in the enumeration * operation. * * @return the max number of items. */ Integer getMaxItemCount() { return this.maxItemCount; } /** * Sets the maximum number of items to be returned in the enumeration * operation. * * @param maxItemCount the max number of items. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setMaxItemCount(Integer maxItemCount) { this.maxItemCount = maxItemCount; return this; } /** * Gets the request continuation token. * * @return the request continuation. */ String getRequestContinuation() { return this.requestContinuation; } /** * Sets the request continuation token. * * @param requestContinuation the request continuation. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setRequestContinuation(String requestContinuation) { this.requestContinuation = requestContinuation; return this; } /** * Gets the partition key used to identify the current request's target * partition. * * @return the partition key. */ public PartitionKey getPartitionKey() { return this.partitionkey; } /** * Sets the partition key used to identify the current request's target * partition. * * @param partitionkey the partition key value. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setPartitionKey(PartitionKey partitionkey) { this.partitionkey = partitionkey; return this; } /** * Gets the option to enable populate query metrics. By default query metrics are enabled. * * @return whether to enable populate query metrics (default: true) */ public boolean isQueryMetricsEnabled() { return queryMetricsEnabled; } /** * Sets the option to enable/disable getting metrics relating to query execution on item query requests. * By default query metrics are enabled. * * @param queryMetricsEnabled whether to enable or disable query metrics * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setQueryMetricsEnabled(boolean queryMetricsEnabled) { this.queryMetricsEnabled = queryMetricsEnabled; return this; } /** * Gets the properties * * @return Map of request options properties */ Map<String, Object> getProperties() { return properties; } /** * Sets the properties used to identify the request token. * * @param properties the properties. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setProperties(Map<String, Object> properties) { this.properties = properties; return this; } /** * Gets the option to allow empty result pages in feed response. * * @return whether to enable allow empty pages or not */ boolean isEmptyPagesAllowed() { return emptyPagesAllowed; } /** * Sets the option to allow empty result pages in feed response. Defaults to false * @param emptyPagesAllowed whether to allow empty pages in feed response * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setEmptyPagesAllowed(boolean emptyPagesAllowed) { this.emptyPagesAllowed = emptyPagesAllowed; return this; } }
Looks like we didn't update the java doc at first place that the default value is `false`. @FabianMeiswinkel - now would be a good time to update the java docs to mention that default value is `true`
public CosmosQueryRequestOptions() { this.queryMetricsEnabled = true; }
this.queryMetricsEnabled = true;
public CosmosQueryRequestOptions() { this.queryMetricsEnabled = true; }
class CosmosQueryRequestOptions { private String sessionToken; private String partitionKeyRangeId; private Boolean scanInQueryEnabled; private Boolean emitVerboseTracesInQuery; private int maxDegreeOfParallelism; private int maxBufferedItemCount; private int responseContinuationTokenLimitInKb; private Integer maxItemCount; private String requestContinuation; private PartitionKey partitionkey; private boolean queryMetricsEnabled; private Map<String, Object> properties; private boolean emptyPagesAllowed; /** * Instantiates a new query request options. */ /** * Instantiates a new query request options. * * @param options the options */ CosmosQueryRequestOptions(CosmosQueryRequestOptions options) { this.sessionToken = options.sessionToken; this.partitionKeyRangeId = options.partitionKeyRangeId; this.scanInQueryEnabled = options.scanInQueryEnabled; this.emitVerboseTracesInQuery = options.emitVerboseTracesInQuery; this.maxDegreeOfParallelism = options.maxDegreeOfParallelism; this.maxBufferedItemCount = options.maxBufferedItemCount; this.responseContinuationTokenLimitInKb = options.responseContinuationTokenLimitInKb; this.maxItemCount = options.maxItemCount; this.requestContinuation = options.requestContinuation; this.partitionkey = options.partitionkey; this.queryMetricsEnabled = options.queryMetricsEnabled; this.emptyPagesAllowed = options.emptyPagesAllowed; } /** * Gets the partitionKeyRangeId. * * @return the partitionKeyRangeId. */ String getPartitionKeyRangeIdInternal() { return this.partitionKeyRangeId; } /** * Sets the partitionKeyRangeId. * * @param partitionKeyRangeId the partitionKeyRangeId. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setPartitionKeyRangeIdInternal(String partitionKeyRangeId) { this.partitionKeyRangeId = partitionKeyRangeId; return this; } /** * Gets the session token for use with session consistency. * * @return the session token. */ public String getSessionToken() { return this.sessionToken; } /** * Sets the session token for use with session consistency. * * @param sessionToken the session token. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setSessionToken(String sessionToken) { this.sessionToken = sessionToken; return this; } /** * Gets the option to allow scan on the queries which couldn't be served as * indexing was opted out on the requested paths. * * @return the option of enable scan in query. */ public Boolean isScanInQueryEnabled() { return this.scanInQueryEnabled; } /** * Sets the option to allow scan on the queries which couldn't be served as * indexing was opted out on the requested paths. * * @param scanInQueryEnabled the option of enable scan in query. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setScanInQueryEnabled(Boolean scanInQueryEnabled) { this.scanInQueryEnabled = scanInQueryEnabled; return this; } /** * Gets the option to allow queries to emit out verbose traces for * investigation. * * @return the emit verbose traces in query. */ Boolean isEmitVerboseTracesInQuery() { return this.emitVerboseTracesInQuery; } /** * Sets the option to allow queries to emit out verbose traces for * investigation. * * @param emitVerboseTracesInQuery the emit verbose traces in query. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setEmitVerboseTracesInQuery(Boolean emitVerboseTracesInQuery) { this.emitVerboseTracesInQuery = emitVerboseTracesInQuery; return this; } /** * Gets the number of concurrent operations run client side during parallel * query execution. * * @return number of concurrent operations run client side during parallel query * execution. */ public int getMaxDegreeOfParallelism() { return maxDegreeOfParallelism; } /** * Sets the number of concurrent operations run client side during parallel * query execution. * * @param maxDegreeOfParallelism number of concurrent operations. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setMaxDegreeOfParallelism(int maxDegreeOfParallelism) { this.maxDegreeOfParallelism = maxDegreeOfParallelism; return this; } /** * Gets the maximum number of items that can be buffered client side during * parallel query execution. * * @return maximum number of items that can be buffered client side during * parallel query execution. */ public int getMaxBufferedItemCount() { return maxBufferedItemCount; } /** * Sets the maximum number of items that can be buffered client side during * parallel query execution. * * @param maxBufferedItemCount maximum number of items. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setMaxBufferedItemCount(int maxBufferedItemCount) { this.maxBufferedItemCount = maxBufferedItemCount; return this; } /** * Sets the ResponseContinuationTokenLimitInKb request option for item query * requests in the Azure Cosmos DB service. * <p> * ResponseContinuationTokenLimitInKb is used to limit the length of * continuation token in the query response. Valid values are &gt;= 1. * <p> * The continuation token contains both required and optional fields. The * required fields are necessary for resuming the execution from where it was * stooped. The optional fields may contain serialized index lookup work that * was done but not yet utilized. This avoids redoing the work again in * subsequent continuations and hence improve the query performance. Setting the * maximum continuation size to 1KB, the Azure Cosmos DB service will only * serialize required fields. Starting from 2KB, the Azure Cosmos DB service * would serialize as much as it could fit till it reaches the maximum specified * size. * * @param limitInKb continuation token size limit. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setResponseContinuationTokenLimitInKb(int limitInKb) { this.responseContinuationTokenLimitInKb = limitInKb; return this; } /** * Gets the ResponseContinuationTokenLimitInKb request option for item query * requests in the Azure Cosmos DB service. If not already set returns 0. * <p> * ResponseContinuationTokenLimitInKb is used to limit the length of * continuation token in the query response. Valid values are &gt;= 1. * * @return return set ResponseContinuationTokenLimitInKb, or 0 if not set */ public int getResponseContinuationTokenLimitInKb() { return responseContinuationTokenLimitInKb; } /** * Gets the maximum number of items to be returned in the enumeration * operation. * * @return the max number of items. */ Integer getMaxItemCount() { return this.maxItemCount; } /** * Sets the maximum number of items to be returned in the enumeration * operation. * * @param maxItemCount the max number of items. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setMaxItemCount(Integer maxItemCount) { this.maxItemCount = maxItemCount; return this; } /** * Gets the request continuation token. * * @return the request continuation. */ String getRequestContinuation() { return this.requestContinuation; } /** * Sets the request continuation token. * * @param requestContinuation the request continuation. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setRequestContinuation(String requestContinuation) { this.requestContinuation = requestContinuation; return this; } /** * Gets the partition key used to identify the current request's target * partition. * * @return the partition key. */ public PartitionKey getPartitionKey() { return this.partitionkey; } /** * Sets the partition key used to identify the current request's target * partition. * * @param partitionkey the partition key value. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setPartitionKey(PartitionKey partitionkey) { this.partitionkey = partitionkey; return this; } /** * Gets the option to enable populate query metrics * * @return whether to enable populate query metrics */ public boolean isQueryMetricsEnabled() { return queryMetricsEnabled; } /** * Sets the option to enable/disable getting metrics relating to query execution on item query requests * * @param queryMetricsEnabled whether to enable or disable query metrics * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setQueryMetricsEnabled(boolean queryMetricsEnabled) { this.queryMetricsEnabled = queryMetricsEnabled; return this; } /** * Gets the properties * * @return Map of request options properties */ Map<String, Object> getProperties() { return properties; } /** * Sets the properties used to identify the request token. * * @param properties the properties. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setProperties(Map<String, Object> properties) { this.properties = properties; return this; } /** * Gets the option to allow empty result pages in feed response. * * @return whether to enable allow empty pages or not */ boolean isEmptyPagesAllowed() { return emptyPagesAllowed; } /** * Sets the option to allow empty result pages in feed response. Defaults to false * @param emptyPagesAllowed whether to allow empty pages in feed response * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setEmptyPagesAllowed(boolean emptyPagesAllowed) { this.emptyPagesAllowed = emptyPagesAllowed; return this; } }
class CosmosQueryRequestOptions { private String sessionToken; private String partitionKeyRangeId; private Boolean scanInQueryEnabled; private Boolean emitVerboseTracesInQuery; private int maxDegreeOfParallelism; private int maxBufferedItemCount; private int responseContinuationTokenLimitInKb; private Integer maxItemCount; private String requestContinuation; private PartitionKey partitionkey; private boolean queryMetricsEnabled; private Map<String, Object> properties; private boolean emptyPagesAllowed; /** * Instantiates a new query request options. */ /** * Instantiates a new query request options. * * @param options the options */ CosmosQueryRequestOptions(CosmosQueryRequestOptions options) { this.sessionToken = options.sessionToken; this.partitionKeyRangeId = options.partitionKeyRangeId; this.scanInQueryEnabled = options.scanInQueryEnabled; this.emitVerboseTracesInQuery = options.emitVerboseTracesInQuery; this.maxDegreeOfParallelism = options.maxDegreeOfParallelism; this.maxBufferedItemCount = options.maxBufferedItemCount; this.responseContinuationTokenLimitInKb = options.responseContinuationTokenLimitInKb; this.maxItemCount = options.maxItemCount; this.requestContinuation = options.requestContinuation; this.partitionkey = options.partitionkey; this.queryMetricsEnabled = options.queryMetricsEnabled; this.emptyPagesAllowed = options.emptyPagesAllowed; } /** * Gets the partitionKeyRangeId. * * @return the partitionKeyRangeId. */ String getPartitionKeyRangeIdInternal() { return this.partitionKeyRangeId; } /** * Sets the partitionKeyRangeId. * * @param partitionKeyRangeId the partitionKeyRangeId. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setPartitionKeyRangeIdInternal(String partitionKeyRangeId) { this.partitionKeyRangeId = partitionKeyRangeId; return this; } /** * Gets the session token for use with session consistency. * * @return the session token. */ public String getSessionToken() { return this.sessionToken; } /** * Sets the session token for use with session consistency. * * @param sessionToken the session token. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setSessionToken(String sessionToken) { this.sessionToken = sessionToken; return this; } /** * Gets the option to allow scan on the queries which couldn't be served as * indexing was opted out on the requested paths. * * @return the option of enable scan in query. */ public Boolean isScanInQueryEnabled() { return this.scanInQueryEnabled; } /** * Sets the option to allow scan on the queries which couldn't be served as * indexing was opted out on the requested paths. * * @param scanInQueryEnabled the option of enable scan in query. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setScanInQueryEnabled(Boolean scanInQueryEnabled) { this.scanInQueryEnabled = scanInQueryEnabled; return this; } /** * Gets the option to allow queries to emit out verbose traces for * investigation. * * @return the emit verbose traces in query. */ Boolean isEmitVerboseTracesInQuery() { return this.emitVerboseTracesInQuery; } /** * Sets the option to allow queries to emit out verbose traces for * investigation. * * @param emitVerboseTracesInQuery the emit verbose traces in query. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setEmitVerboseTracesInQuery(Boolean emitVerboseTracesInQuery) { this.emitVerboseTracesInQuery = emitVerboseTracesInQuery; return this; } /** * Gets the number of concurrent operations run client side during parallel * query execution. * * @return number of concurrent operations run client side during parallel query * execution. */ public int getMaxDegreeOfParallelism() { return maxDegreeOfParallelism; } /** * Sets the number of concurrent operations run client side during parallel * query execution. * * @param maxDegreeOfParallelism number of concurrent operations. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setMaxDegreeOfParallelism(int maxDegreeOfParallelism) { this.maxDegreeOfParallelism = maxDegreeOfParallelism; return this; } /** * Gets the maximum number of items that can be buffered client side during * parallel query execution. * * @return maximum number of items that can be buffered client side during * parallel query execution. */ public int getMaxBufferedItemCount() { return maxBufferedItemCount; } /** * Sets the maximum number of items that can be buffered client side during * parallel query execution. * * @param maxBufferedItemCount maximum number of items. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setMaxBufferedItemCount(int maxBufferedItemCount) { this.maxBufferedItemCount = maxBufferedItemCount; return this; } /** * Sets the ResponseContinuationTokenLimitInKb request option for item query * requests in the Azure Cosmos DB service. * <p> * ResponseContinuationTokenLimitInKb is used to limit the length of * continuation token in the query response. Valid values are &gt;= 1. * <p> * The continuation token contains both required and optional fields. The * required fields are necessary for resuming the execution from where it was * stooped. The optional fields may contain serialized index lookup work that * was done but not yet utilized. This avoids redoing the work again in * subsequent continuations and hence improve the query performance. Setting the * maximum continuation size to 1KB, the Azure Cosmos DB service will only * serialize required fields. Starting from 2KB, the Azure Cosmos DB service * would serialize as much as it could fit till it reaches the maximum specified * size. * * @param limitInKb continuation token size limit. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setResponseContinuationTokenLimitInKb(int limitInKb) { this.responseContinuationTokenLimitInKb = limitInKb; return this; } /** * Gets the ResponseContinuationTokenLimitInKb request option for item query * requests in the Azure Cosmos DB service. If not already set returns 0. * <p> * ResponseContinuationTokenLimitInKb is used to limit the length of * continuation token in the query response. Valid values are &gt;= 1. * * @return return set ResponseContinuationTokenLimitInKb, or 0 if not set */ public int getResponseContinuationTokenLimitInKb() { return responseContinuationTokenLimitInKb; } /** * Gets the maximum number of items to be returned in the enumeration * operation. * * @return the max number of items. */ Integer getMaxItemCount() { return this.maxItemCount; } /** * Sets the maximum number of items to be returned in the enumeration * operation. * * @param maxItemCount the max number of items. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setMaxItemCount(Integer maxItemCount) { this.maxItemCount = maxItemCount; return this; } /** * Gets the request continuation token. * * @return the request continuation. */ String getRequestContinuation() { return this.requestContinuation; } /** * Sets the request continuation token. * * @param requestContinuation the request continuation. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setRequestContinuation(String requestContinuation) { this.requestContinuation = requestContinuation; return this; } /** * Gets the partition key used to identify the current request's target * partition. * * @return the partition key. */ public PartitionKey getPartitionKey() { return this.partitionkey; } /** * Sets the partition key used to identify the current request's target * partition. * * @param partitionkey the partition key value. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setPartitionKey(PartitionKey partitionkey) { this.partitionkey = partitionkey; return this; } /** * Gets the option to enable populate query metrics. By default query metrics are enabled. * * @return whether to enable populate query metrics (default: true) */ public boolean isQueryMetricsEnabled() { return queryMetricsEnabled; } /** * Sets the option to enable/disable getting metrics relating to query execution on item query requests. * By default query metrics are enabled. * * @param queryMetricsEnabled whether to enable or disable query metrics * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setQueryMetricsEnabled(boolean queryMetricsEnabled) { this.queryMetricsEnabled = queryMetricsEnabled; return this; } /** * Gets the properties * * @return Map of request options properties */ Map<String, Object> getProperties() { return properties; } /** * Sets the properties used to identify the request token. * * @param properties the properties. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setProperties(Map<String, Object> properties) { this.properties = properties; return this; } /** * Gets the option to allow empty result pages in feed response. * * @return whether to enable allow empty pages or not */ boolean isEmptyPagesAllowed() { return emptyPagesAllowed; } /** * Sets the option to allow empty result pages in feed response. Defaults to false * @param emptyPagesAllowed whether to allow empty pages in feed response * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setEmptyPagesAllowed(boolean emptyPagesAllowed) { this.emptyPagesAllowed = emptyPagesAllowed; return this; } }
Fixed in next iteration.
public CosmosQueryRequestOptions() { this.queryMetricsEnabled = true; }
this.queryMetricsEnabled = true;
public CosmosQueryRequestOptions() { this.queryMetricsEnabled = true; }
class CosmosQueryRequestOptions { private String sessionToken; private String partitionKeyRangeId; private Boolean scanInQueryEnabled; private Boolean emitVerboseTracesInQuery; private int maxDegreeOfParallelism; private int maxBufferedItemCount; private int responseContinuationTokenLimitInKb; private Integer maxItemCount; private String requestContinuation; private PartitionKey partitionkey; private boolean queryMetricsEnabled; private Map<String, Object> properties; private boolean emptyPagesAllowed; /** * Instantiates a new query request options. */ /** * Instantiates a new query request options. * * @param options the options */ CosmosQueryRequestOptions(CosmosQueryRequestOptions options) { this.sessionToken = options.sessionToken; this.partitionKeyRangeId = options.partitionKeyRangeId; this.scanInQueryEnabled = options.scanInQueryEnabled; this.emitVerboseTracesInQuery = options.emitVerboseTracesInQuery; this.maxDegreeOfParallelism = options.maxDegreeOfParallelism; this.maxBufferedItemCount = options.maxBufferedItemCount; this.responseContinuationTokenLimitInKb = options.responseContinuationTokenLimitInKb; this.maxItemCount = options.maxItemCount; this.requestContinuation = options.requestContinuation; this.partitionkey = options.partitionkey; this.queryMetricsEnabled = options.queryMetricsEnabled; this.emptyPagesAllowed = options.emptyPagesAllowed; } /** * Gets the partitionKeyRangeId. * * @return the partitionKeyRangeId. */ String getPartitionKeyRangeIdInternal() { return this.partitionKeyRangeId; } /** * Sets the partitionKeyRangeId. * * @param partitionKeyRangeId the partitionKeyRangeId. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setPartitionKeyRangeIdInternal(String partitionKeyRangeId) { this.partitionKeyRangeId = partitionKeyRangeId; return this; } /** * Gets the session token for use with session consistency. * * @return the session token. */ public String getSessionToken() { return this.sessionToken; } /** * Sets the session token for use with session consistency. * * @param sessionToken the session token. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setSessionToken(String sessionToken) { this.sessionToken = sessionToken; return this; } /** * Gets the option to allow scan on the queries which couldn't be served as * indexing was opted out on the requested paths. * * @return the option of enable scan in query. */ public Boolean isScanInQueryEnabled() { return this.scanInQueryEnabled; } /** * Sets the option to allow scan on the queries which couldn't be served as * indexing was opted out on the requested paths. * * @param scanInQueryEnabled the option of enable scan in query. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setScanInQueryEnabled(Boolean scanInQueryEnabled) { this.scanInQueryEnabled = scanInQueryEnabled; return this; } /** * Gets the option to allow queries to emit out verbose traces for * investigation. * * @return the emit verbose traces in query. */ Boolean isEmitVerboseTracesInQuery() { return this.emitVerboseTracesInQuery; } /** * Sets the option to allow queries to emit out verbose traces for * investigation. * * @param emitVerboseTracesInQuery the emit verbose traces in query. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setEmitVerboseTracesInQuery(Boolean emitVerboseTracesInQuery) { this.emitVerboseTracesInQuery = emitVerboseTracesInQuery; return this; } /** * Gets the number of concurrent operations run client side during parallel * query execution. * * @return number of concurrent operations run client side during parallel query * execution. */ public int getMaxDegreeOfParallelism() { return maxDegreeOfParallelism; } /** * Sets the number of concurrent operations run client side during parallel * query execution. * * @param maxDegreeOfParallelism number of concurrent operations. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setMaxDegreeOfParallelism(int maxDegreeOfParallelism) { this.maxDegreeOfParallelism = maxDegreeOfParallelism; return this; } /** * Gets the maximum number of items that can be buffered client side during * parallel query execution. * * @return maximum number of items that can be buffered client side during * parallel query execution. */ public int getMaxBufferedItemCount() { return maxBufferedItemCount; } /** * Sets the maximum number of items that can be buffered client side during * parallel query execution. * * @param maxBufferedItemCount maximum number of items. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setMaxBufferedItemCount(int maxBufferedItemCount) { this.maxBufferedItemCount = maxBufferedItemCount; return this; } /** * Sets the ResponseContinuationTokenLimitInKb request option for item query * requests in the Azure Cosmos DB service. * <p> * ResponseContinuationTokenLimitInKb is used to limit the length of * continuation token in the query response. Valid values are &gt;= 1. * <p> * The continuation token contains both required and optional fields. The * required fields are necessary for resuming the execution from where it was * stooped. The optional fields may contain serialized index lookup work that * was done but not yet utilized. This avoids redoing the work again in * subsequent continuations and hence improve the query performance. Setting the * maximum continuation size to 1KB, the Azure Cosmos DB service will only * serialize required fields. Starting from 2KB, the Azure Cosmos DB service * would serialize as much as it could fit till it reaches the maximum specified * size. * * @param limitInKb continuation token size limit. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setResponseContinuationTokenLimitInKb(int limitInKb) { this.responseContinuationTokenLimitInKb = limitInKb; return this; } /** * Gets the ResponseContinuationTokenLimitInKb request option for item query * requests in the Azure Cosmos DB service. If not already set returns 0. * <p> * ResponseContinuationTokenLimitInKb is used to limit the length of * continuation token in the query response. Valid values are &gt;= 1. * * @return return set ResponseContinuationTokenLimitInKb, or 0 if not set */ public int getResponseContinuationTokenLimitInKb() { return responseContinuationTokenLimitInKb; } /** * Gets the maximum number of items to be returned in the enumeration * operation. * * @return the max number of items. */ Integer getMaxItemCount() { return this.maxItemCount; } /** * Sets the maximum number of items to be returned in the enumeration * operation. * * @param maxItemCount the max number of items. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setMaxItemCount(Integer maxItemCount) { this.maxItemCount = maxItemCount; return this; } /** * Gets the request continuation token. * * @return the request continuation. */ String getRequestContinuation() { return this.requestContinuation; } /** * Sets the request continuation token. * * @param requestContinuation the request continuation. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setRequestContinuation(String requestContinuation) { this.requestContinuation = requestContinuation; return this; } /** * Gets the partition key used to identify the current request's target * partition. * * @return the partition key. */ public PartitionKey getPartitionKey() { return this.partitionkey; } /** * Sets the partition key used to identify the current request's target * partition. * * @param partitionkey the partition key value. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setPartitionKey(PartitionKey partitionkey) { this.partitionkey = partitionkey; return this; } /** * Gets the option to enable populate query metrics * * @return whether to enable populate query metrics */ public boolean isQueryMetricsEnabled() { return queryMetricsEnabled; } /** * Sets the option to enable/disable getting metrics relating to query execution on item query requests * * @param queryMetricsEnabled whether to enable or disable query metrics * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setQueryMetricsEnabled(boolean queryMetricsEnabled) { this.queryMetricsEnabled = queryMetricsEnabled; return this; } /** * Gets the properties * * @return Map of request options properties */ Map<String, Object> getProperties() { return properties; } /** * Sets the properties used to identify the request token. * * @param properties the properties. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setProperties(Map<String, Object> properties) { this.properties = properties; return this; } /** * Gets the option to allow empty result pages in feed response. * * @return whether to enable allow empty pages or not */ boolean isEmptyPagesAllowed() { return emptyPagesAllowed; } /** * Sets the option to allow empty result pages in feed response. Defaults to false * @param emptyPagesAllowed whether to allow empty pages in feed response * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setEmptyPagesAllowed(boolean emptyPagesAllowed) { this.emptyPagesAllowed = emptyPagesAllowed; return this; } }
class CosmosQueryRequestOptions { private String sessionToken; private String partitionKeyRangeId; private Boolean scanInQueryEnabled; private Boolean emitVerboseTracesInQuery; private int maxDegreeOfParallelism; private int maxBufferedItemCount; private int responseContinuationTokenLimitInKb; private Integer maxItemCount; private String requestContinuation; private PartitionKey partitionkey; private boolean queryMetricsEnabled; private Map<String, Object> properties; private boolean emptyPagesAllowed; /** * Instantiates a new query request options. */ /** * Instantiates a new query request options. * * @param options the options */ CosmosQueryRequestOptions(CosmosQueryRequestOptions options) { this.sessionToken = options.sessionToken; this.partitionKeyRangeId = options.partitionKeyRangeId; this.scanInQueryEnabled = options.scanInQueryEnabled; this.emitVerboseTracesInQuery = options.emitVerboseTracesInQuery; this.maxDegreeOfParallelism = options.maxDegreeOfParallelism; this.maxBufferedItemCount = options.maxBufferedItemCount; this.responseContinuationTokenLimitInKb = options.responseContinuationTokenLimitInKb; this.maxItemCount = options.maxItemCount; this.requestContinuation = options.requestContinuation; this.partitionkey = options.partitionkey; this.queryMetricsEnabled = options.queryMetricsEnabled; this.emptyPagesAllowed = options.emptyPagesAllowed; } /** * Gets the partitionKeyRangeId. * * @return the partitionKeyRangeId. */ String getPartitionKeyRangeIdInternal() { return this.partitionKeyRangeId; } /** * Sets the partitionKeyRangeId. * * @param partitionKeyRangeId the partitionKeyRangeId. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setPartitionKeyRangeIdInternal(String partitionKeyRangeId) { this.partitionKeyRangeId = partitionKeyRangeId; return this; } /** * Gets the session token for use with session consistency. * * @return the session token. */ public String getSessionToken() { return this.sessionToken; } /** * Sets the session token for use with session consistency. * * @param sessionToken the session token. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setSessionToken(String sessionToken) { this.sessionToken = sessionToken; return this; } /** * Gets the option to allow scan on the queries which couldn't be served as * indexing was opted out on the requested paths. * * @return the option of enable scan in query. */ public Boolean isScanInQueryEnabled() { return this.scanInQueryEnabled; } /** * Sets the option to allow scan on the queries which couldn't be served as * indexing was opted out on the requested paths. * * @param scanInQueryEnabled the option of enable scan in query. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setScanInQueryEnabled(Boolean scanInQueryEnabled) { this.scanInQueryEnabled = scanInQueryEnabled; return this; } /** * Gets the option to allow queries to emit out verbose traces for * investigation. * * @return the emit verbose traces in query. */ Boolean isEmitVerboseTracesInQuery() { return this.emitVerboseTracesInQuery; } /** * Sets the option to allow queries to emit out verbose traces for * investigation. * * @param emitVerboseTracesInQuery the emit verbose traces in query. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setEmitVerboseTracesInQuery(Boolean emitVerboseTracesInQuery) { this.emitVerboseTracesInQuery = emitVerboseTracesInQuery; return this; } /** * Gets the number of concurrent operations run client side during parallel * query execution. * * @return number of concurrent operations run client side during parallel query * execution. */ public int getMaxDegreeOfParallelism() { return maxDegreeOfParallelism; } /** * Sets the number of concurrent operations run client side during parallel * query execution. * * @param maxDegreeOfParallelism number of concurrent operations. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setMaxDegreeOfParallelism(int maxDegreeOfParallelism) { this.maxDegreeOfParallelism = maxDegreeOfParallelism; return this; } /** * Gets the maximum number of items that can be buffered client side during * parallel query execution. * * @return maximum number of items that can be buffered client side during * parallel query execution. */ public int getMaxBufferedItemCount() { return maxBufferedItemCount; } /** * Sets the maximum number of items that can be buffered client side during * parallel query execution. * * @param maxBufferedItemCount maximum number of items. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setMaxBufferedItemCount(int maxBufferedItemCount) { this.maxBufferedItemCount = maxBufferedItemCount; return this; } /** * Sets the ResponseContinuationTokenLimitInKb request option for item query * requests in the Azure Cosmos DB service. * <p> * ResponseContinuationTokenLimitInKb is used to limit the length of * continuation token in the query response. Valid values are &gt;= 1. * <p> * The continuation token contains both required and optional fields. The * required fields are necessary for resuming the execution from where it was * stooped. The optional fields may contain serialized index lookup work that * was done but not yet utilized. This avoids redoing the work again in * subsequent continuations and hence improve the query performance. Setting the * maximum continuation size to 1KB, the Azure Cosmos DB service will only * serialize required fields. Starting from 2KB, the Azure Cosmos DB service * would serialize as much as it could fit till it reaches the maximum specified * size. * * @param limitInKb continuation token size limit. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setResponseContinuationTokenLimitInKb(int limitInKb) { this.responseContinuationTokenLimitInKb = limitInKb; return this; } /** * Gets the ResponseContinuationTokenLimitInKb request option for item query * requests in the Azure Cosmos DB service. If not already set returns 0. * <p> * ResponseContinuationTokenLimitInKb is used to limit the length of * continuation token in the query response. Valid values are &gt;= 1. * * @return return set ResponseContinuationTokenLimitInKb, or 0 if not set */ public int getResponseContinuationTokenLimitInKb() { return responseContinuationTokenLimitInKb; } /** * Gets the maximum number of items to be returned in the enumeration * operation. * * @return the max number of items. */ Integer getMaxItemCount() { return this.maxItemCount; } /** * Sets the maximum number of items to be returned in the enumeration * operation. * * @param maxItemCount the max number of items. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setMaxItemCount(Integer maxItemCount) { this.maxItemCount = maxItemCount; return this; } /** * Gets the request continuation token. * * @return the request continuation. */ String getRequestContinuation() { return this.requestContinuation; } /** * Sets the request continuation token. * * @param requestContinuation the request continuation. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setRequestContinuation(String requestContinuation) { this.requestContinuation = requestContinuation; return this; } /** * Gets the partition key used to identify the current request's target * partition. * * @return the partition key. */ public PartitionKey getPartitionKey() { return this.partitionkey; } /** * Sets the partition key used to identify the current request's target * partition. * * @param partitionkey the partition key value. * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setPartitionKey(PartitionKey partitionkey) { this.partitionkey = partitionkey; return this; } /** * Gets the option to enable populate query metrics. By default query metrics are enabled. * * @return whether to enable populate query metrics (default: true) */ public boolean isQueryMetricsEnabled() { return queryMetricsEnabled; } /** * Sets the option to enable/disable getting metrics relating to query execution on item query requests. * By default query metrics are enabled. * * @param queryMetricsEnabled whether to enable or disable query metrics * @return the CosmosQueryRequestOptions. */ public CosmosQueryRequestOptions setQueryMetricsEnabled(boolean queryMetricsEnabled) { this.queryMetricsEnabled = queryMetricsEnabled; return this; } /** * Gets the properties * * @return Map of request options properties */ Map<String, Object> getProperties() { return properties; } /** * Sets the properties used to identify the request token. * * @param properties the properties. * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setProperties(Map<String, Object> properties) { this.properties = properties; return this; } /** * Gets the option to allow empty result pages in feed response. * * @return whether to enable allow empty pages or not */ boolean isEmptyPagesAllowed() { return emptyPagesAllowed; } /** * Sets the option to allow empty result pages in feed response. Defaults to false * @param emptyPagesAllowed whether to allow empty pages in feed response * @return the CosmosQueryRequestOptions. */ CosmosQueryRequestOptions setEmptyPagesAllowed(boolean emptyPagesAllowed) { this.emptyPagesAllowed = emptyPagesAllowed; return this; } }
justCurious why delay is added ?
protected Mono<Void> populateIndex(int documentCount, String documentSize) { /* * Generate the count of documents using the given size. Then, upload the documents in batches of 100, this * prevents the batch from triggering the services request size limit to fail. Finally, continuously poll the * index for its document count until it is equal to the count passed. */ return Mono.defer(() -> { List<Hotel> hotels = DocumentGenerator.generateHotels(documentCount, DocumentSize.valueOf(documentSize)); return Flux.range(0, (int) Math.ceil(hotels.size() / 100D)) .map(i -> hotels.subList(i * 100, Math.min((i + 1) * 100, hotels.size()))) .flatMap(hotelDocuments -> searchAsyncClient.indexDocuments(new IndexDocumentsBatch<Hotel>() .addUploadActions(hotelDocuments))) .then(); }).then(Mono.defer(() -> searchAsyncClient.getDocumentCount() .delaySubscription(Duration.ofSeconds(1)) .filter(count -> count == documentCount) .repeatWhenEmpty(Flux::repeat) .then())); }
.delaySubscription(Duration.ofSeconds(1))
protected Mono<Void> populateIndex(int documentCount, String documentSize) { /* * Generate the count of documents using the given size. Then, upload the documents in batches of 100, this * prevents the batch from triggering the services request size limit to fail. Finally, continuously poll the * index for its document count until it is equal to the count passed. */ return Mono.defer(() -> { List<Hotel> hotels = DocumentGenerator.generateHotels(documentCount, DocumentSize.valueOf(documentSize)); return Flux.range(0, (int) Math.ceil(hotels.size() / 100D)) .map(i -> hotels.subList(i * 100, Math.min((i + 1) * 100, hotels.size()))) .flatMap(hotelDocuments -> searchAsyncClient.indexDocuments(new IndexDocumentsBatch<Hotel>() .addUploadActions(hotelDocuments))) .then(); }).then(Mono.defer(() -> searchAsyncClient.getDocumentCount() .delaySubscription(Duration.ofSeconds(1)) .filter(count -> count == documentCount) .repeatWhenEmpty(Flux::repeat) .then())); }
class ServiceTest<TOptions extends PerfStressOptions> extends PerfStressTest<TOptions> { private static final String CONFIGURATION_ERROR = "Configuration %s must be set in either environment variables " + "or system properties.%n"; private static final String ALLOWED_INDEX_CHARACTERS = "abcdefghijklmnopqrstuvwxyz0123456789"; private static final int INDEX_NAME_LENGTH = 24; protected static final String SUGGESTER_NAME = "sg"; protected final SearchClient searchClient; protected final SearchAsyncClient searchAsyncClient; private final SearchIndexAsyncClient searchIndexAsyncClient; private final String indexName; public ServiceTest(TOptions options) { super(options); String searchEndpoint = Configuration.getGlobalConfiguration().get("SEARCH_ENDPOINT"); if (CoreUtils.isNullOrEmpty(searchEndpoint)) { System.out.printf(CONFIGURATION_ERROR, "SEARCH_ENDPOINT"); System.exit(1); } String searchApiKey = Configuration.getGlobalConfiguration().get("SEARCH_API_KEY"); if (CoreUtils.isNullOrEmpty(searchApiKey)) { System.out.printf(CONFIGURATION_ERROR, "SEARCH_API_KEY"); System.exit(1); } SearchIndexClientBuilder builder = new SearchIndexClientBuilder() .endpoint(searchEndpoint) .credential(new AzureKeyCredential(searchApiKey)) .httpClient(new NettyAsyncHttpClientBuilder().build()); this.searchIndexAsyncClient = builder.buildAsyncClient(); Random random = new Random(); StringBuilder stringBuilder = new StringBuilder(); for (int i = 0; i < INDEX_NAME_LENGTH; i++) { stringBuilder.append(ALLOWED_INDEX_CHARACTERS.charAt(random.nextInt(ALLOWED_INDEX_CHARACTERS.length()))); } this.indexName = stringBuilder.toString(); this.searchClient = builder.buildClient().getSearchClient(this.indexName); this.searchAsyncClient = this.searchIndexAsyncClient.getSearchAsyncClient(this.indexName); } @Override public Mono<Void> globalSetupAsync() { return searchIndexAsyncClient .createIndex(new SearchIndex(indexName, SearchIndexAsyncClient.buildSearchFields(Hotel.class, null)) .setSuggesters(new SearchSuggester(SUGGESTER_NAME, Arrays.asList("Description", "HotelName")))) .then(); } @Override public Mono<Void> globalCleanupAsync() { return searchIndexAsyncClient.deleteIndex(indexName); } }
class ServiceTest<TOptions extends PerfStressOptions> extends PerfStressTest<TOptions> { private static final String CONFIGURATION_ERROR = "Configuration %s must be set in either environment variables " + "or system properties.%n"; private static final String ALLOWED_INDEX_CHARACTERS = "abcdefghijklmnopqrstuvwxyz0123456789"; private static final int INDEX_NAME_LENGTH = 24; protected static final String SUGGESTER_NAME = "sg"; protected final SearchClient searchClient; protected final SearchAsyncClient searchAsyncClient; private final SearchIndexAsyncClient searchIndexAsyncClient; private final String indexName; public ServiceTest(TOptions options) { super(options); String searchEndpoint = Configuration.getGlobalConfiguration().get("SEARCH_ENDPOINT"); if (CoreUtils.isNullOrEmpty(searchEndpoint)) { System.out.printf(CONFIGURATION_ERROR, "SEARCH_ENDPOINT"); System.exit(1); } String searchApiKey = Configuration.getGlobalConfiguration().get("SEARCH_API_KEY"); if (CoreUtils.isNullOrEmpty(searchApiKey)) { System.out.printf(CONFIGURATION_ERROR, "SEARCH_API_KEY"); System.exit(1); } SearchIndexClientBuilder builder = new SearchIndexClientBuilder() .endpoint(searchEndpoint) .credential(new AzureKeyCredential(searchApiKey)) .httpClient(new NettyAsyncHttpClientBuilder() .proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888))) .build()); this.searchIndexAsyncClient = builder.buildAsyncClient(); this.indexName = new Random().ints(INDEX_NAME_LENGTH, 0, ALLOWED_INDEX_CHARACTERS.length()) .mapToObj(ALLOWED_INDEX_CHARACTERS::charAt) .collect(StringBuilder::new, StringBuilder::append, StringBuilder::append) .toString(); this.searchClient = builder.buildClient().getSearchClient(this.indexName); this.searchAsyncClient = this.searchIndexAsyncClient.getSearchAsyncClient(this.indexName); } @Override public Mono<Void> globalSetupAsync() { return searchIndexAsyncClient .createIndex(new SearchIndex(indexName, SearchIndexAsyncClient.buildSearchFields(Hotel.class, null)) .setSuggesters(new SearchSuggester(SUGGESTER_NAME, Arrays.asList("Description", "HotelName")))) .then(); } @Override public Mono<Void> globalCleanupAsync() { return searchIndexAsyncClient.deleteIndex(indexName); } }
The indexName generation logic can be optionally replaced with this. ```java this.indexName = random.ints(0, ALLOWED_INDEX_CHARACTERS.length()) .limit(INDEX_NAME_LENGTH) .collect(StringBuilder::new, ((stringBuilder, value) -> stringBuilder.append(ALLOWED_INDEX_CHARACTERS.charAt(value))), StringBuilder::append) .toString(); ```
public ServiceTest(TOptions options) { super(options); String searchEndpoint = Configuration.getGlobalConfiguration().get("SEARCH_ENDPOINT"); if (CoreUtils.isNullOrEmpty(searchEndpoint)) { System.out.printf(CONFIGURATION_ERROR, "SEARCH_ENDPOINT"); System.exit(1); } String searchApiKey = Configuration.getGlobalConfiguration().get("SEARCH_API_KEY"); if (CoreUtils.isNullOrEmpty(searchApiKey)) { System.out.printf(CONFIGURATION_ERROR, "SEARCH_API_KEY"); System.exit(1); } SearchIndexClientBuilder builder = new SearchIndexClientBuilder() .endpoint(searchEndpoint) .credential(new AzureKeyCredential(searchApiKey)) .httpClient(new NettyAsyncHttpClientBuilder().build()); this.searchIndexAsyncClient = builder.buildAsyncClient(); Random random = new Random(); StringBuilder stringBuilder = new StringBuilder(); for (int i = 0; i < INDEX_NAME_LENGTH; i++) { stringBuilder.append(ALLOWED_INDEX_CHARACTERS.charAt(random.nextInt(ALLOWED_INDEX_CHARACTERS.length()))); } this.indexName = stringBuilder.toString(); this.searchClient = builder.buildClient().getSearchClient(this.indexName); this.searchAsyncClient = this.searchIndexAsyncClient.getSearchAsyncClient(this.indexName); }
this.indexName = stringBuilder.toString();
public ServiceTest(TOptions options) { super(options); String searchEndpoint = Configuration.getGlobalConfiguration().get("SEARCH_ENDPOINT"); if (CoreUtils.isNullOrEmpty(searchEndpoint)) { System.out.printf(CONFIGURATION_ERROR, "SEARCH_ENDPOINT"); System.exit(1); } String searchApiKey = Configuration.getGlobalConfiguration().get("SEARCH_API_KEY"); if (CoreUtils.isNullOrEmpty(searchApiKey)) { System.out.printf(CONFIGURATION_ERROR, "SEARCH_API_KEY"); System.exit(1); } SearchIndexClientBuilder builder = new SearchIndexClientBuilder() .endpoint(searchEndpoint) .credential(new AzureKeyCredential(searchApiKey)) .httpClient(new NettyAsyncHttpClientBuilder() .proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888))) .build()); this.searchIndexAsyncClient = builder.buildAsyncClient(); this.indexName = new Random().ints(INDEX_NAME_LENGTH, 0, ALLOWED_INDEX_CHARACTERS.length()) .mapToObj(ALLOWED_INDEX_CHARACTERS::charAt) .collect(StringBuilder::new, StringBuilder::append, StringBuilder::append) .toString(); this.searchClient = builder.buildClient().getSearchClient(this.indexName); this.searchAsyncClient = this.searchIndexAsyncClient.getSearchAsyncClient(this.indexName); }
class ServiceTest<TOptions extends PerfStressOptions> extends PerfStressTest<TOptions> { private static final String CONFIGURATION_ERROR = "Configuration %s must be set in either environment variables " + "or system properties.%n"; private static final String ALLOWED_INDEX_CHARACTERS = "abcdefghijklmnopqrstuvwxyz0123456789"; private static final int INDEX_NAME_LENGTH = 24; protected static final String SUGGESTER_NAME = "sg"; protected final SearchClient searchClient; protected final SearchAsyncClient searchAsyncClient; private final SearchIndexAsyncClient searchIndexAsyncClient; private final String indexName; @Override public Mono<Void> globalSetupAsync() { return searchIndexAsyncClient .createIndex(new SearchIndex(indexName, SearchIndexAsyncClient.buildSearchFields(Hotel.class, null)) .setSuggesters(new SearchSuggester(SUGGESTER_NAME, Arrays.asList("Description", "HotelName")))) .then(); } @Override public Mono<Void> globalCleanupAsync() { return searchIndexAsyncClient.deleteIndex(indexName); } protected Mono<Void> populateIndex(int documentCount, String documentSize) { /* * Generate the count of documents using the given size. Then, upload the documents in batches of 100, this * prevents the batch from triggering the services request size limit to fail. Finally, continuously poll the * index for its document count until it is equal to the count passed. */ return Mono.defer(() -> { List<Hotel> hotels = DocumentGenerator.generateHotels(documentCount, DocumentSize.valueOf(documentSize)); return Flux.range(0, (int) Math.ceil(hotels.size() / 100D)) .map(i -> hotels.subList(i * 100, Math.min((i + 1) * 100, hotels.size()))) .flatMap(hotelDocuments -> searchAsyncClient.indexDocuments(new IndexDocumentsBatch<Hotel>() .addUploadActions(hotelDocuments))) .then(); }).then(Mono.defer(() -> searchAsyncClient.getDocumentCount() .delaySubscription(Duration.ofSeconds(1)) .filter(count -> count == documentCount) .repeatWhenEmpty(Flux::repeat) .then())); } }
class ServiceTest<TOptions extends PerfStressOptions> extends PerfStressTest<TOptions> { private static final String CONFIGURATION_ERROR = "Configuration %s must be set in either environment variables " + "or system properties.%n"; private static final String ALLOWED_INDEX_CHARACTERS = "abcdefghijklmnopqrstuvwxyz0123456789"; private static final int INDEX_NAME_LENGTH = 24; protected static final String SUGGESTER_NAME = "sg"; protected final SearchClient searchClient; protected final SearchAsyncClient searchAsyncClient; private final SearchIndexAsyncClient searchIndexAsyncClient; private final String indexName; @Override public Mono<Void> globalSetupAsync() { return searchIndexAsyncClient .createIndex(new SearchIndex(indexName, SearchIndexAsyncClient.buildSearchFields(Hotel.class, null)) .setSuggesters(new SearchSuggester(SUGGESTER_NAME, Arrays.asList("Description", "HotelName")))) .then(); } @Override public Mono<Void> globalCleanupAsync() { return searchIndexAsyncClient.deleteIndex(indexName); } protected Mono<Void> populateIndex(int documentCount, String documentSize) { /* * Generate the count of documents using the given size. Then, upload the documents in batches of 100, this * prevents the batch from triggering the services request size limit to fail. Finally, continuously poll the * index for its document count until it is equal to the count passed. */ return Mono.defer(() -> { List<Hotel> hotels = DocumentGenerator.generateHotels(documentCount, DocumentSize.valueOf(documentSize)); return Flux.range(0, (int) Math.ceil(hotels.size() / 100D)) .map(i -> hotels.subList(i * 100, Math.min((i + 1) * 100, hotels.size()))) .flatMap(hotelDocuments -> searchAsyncClient.indexDocuments(new IndexDocumentsBatch<Hotel>() .addUploadActions(hotelDocuments))) .then(); }).then(Mono.defer(() -> searchAsyncClient.getDocumentCount() .delaySubscription(Duration.ofSeconds(1)) .filter(count -> count == documentCount) .repeatWhenEmpty(Flux::repeat) .then())); } }
Document indexing doesn't always complete instantly, so I don't want to begin testing while it is still running nor do I want to spam the service with a ton of requests.
protected Mono<Void> populateIndex(int documentCount, String documentSize) { /* * Generate the count of documents using the given size. Then, upload the documents in batches of 100, this * prevents the batch from triggering the services request size limit to fail. Finally, continuously poll the * index for its document count until it is equal to the count passed. */ return Mono.defer(() -> { List<Hotel> hotels = DocumentGenerator.generateHotels(documentCount, DocumentSize.valueOf(documentSize)); return Flux.range(0, (int) Math.ceil(hotels.size() / 100D)) .map(i -> hotels.subList(i * 100, Math.min((i + 1) * 100, hotels.size()))) .flatMap(hotelDocuments -> searchAsyncClient.indexDocuments(new IndexDocumentsBatch<Hotel>() .addUploadActions(hotelDocuments))) .then(); }).then(Mono.defer(() -> searchAsyncClient.getDocumentCount() .delaySubscription(Duration.ofSeconds(1)) .filter(count -> count == documentCount) .repeatWhenEmpty(Flux::repeat) .then())); }
.delaySubscription(Duration.ofSeconds(1))
protected Mono<Void> populateIndex(int documentCount, String documentSize) { /* * Generate the count of documents using the given size. Then, upload the documents in batches of 100, this * prevents the batch from triggering the services request size limit to fail. Finally, continuously poll the * index for its document count until it is equal to the count passed. */ return Mono.defer(() -> { List<Hotel> hotels = DocumentGenerator.generateHotels(documentCount, DocumentSize.valueOf(documentSize)); return Flux.range(0, (int) Math.ceil(hotels.size() / 100D)) .map(i -> hotels.subList(i * 100, Math.min((i + 1) * 100, hotels.size()))) .flatMap(hotelDocuments -> searchAsyncClient.indexDocuments(new IndexDocumentsBatch<Hotel>() .addUploadActions(hotelDocuments))) .then(); }).then(Mono.defer(() -> searchAsyncClient.getDocumentCount() .delaySubscription(Duration.ofSeconds(1)) .filter(count -> count == documentCount) .repeatWhenEmpty(Flux::repeat) .then())); }
class ServiceTest<TOptions extends PerfStressOptions> extends PerfStressTest<TOptions> { private static final String CONFIGURATION_ERROR = "Configuration %s must be set in either environment variables " + "or system properties.%n"; private static final String ALLOWED_INDEX_CHARACTERS = "abcdefghijklmnopqrstuvwxyz0123456789"; private static final int INDEX_NAME_LENGTH = 24; protected static final String SUGGESTER_NAME = "sg"; protected final SearchClient searchClient; protected final SearchAsyncClient searchAsyncClient; private final SearchIndexAsyncClient searchIndexAsyncClient; private final String indexName; public ServiceTest(TOptions options) { super(options); String searchEndpoint = Configuration.getGlobalConfiguration().get("SEARCH_ENDPOINT"); if (CoreUtils.isNullOrEmpty(searchEndpoint)) { System.out.printf(CONFIGURATION_ERROR, "SEARCH_ENDPOINT"); System.exit(1); } String searchApiKey = Configuration.getGlobalConfiguration().get("SEARCH_API_KEY"); if (CoreUtils.isNullOrEmpty(searchApiKey)) { System.out.printf(CONFIGURATION_ERROR, "SEARCH_API_KEY"); System.exit(1); } SearchIndexClientBuilder builder = new SearchIndexClientBuilder() .endpoint(searchEndpoint) .credential(new AzureKeyCredential(searchApiKey)) .httpClient(new NettyAsyncHttpClientBuilder().build()); this.searchIndexAsyncClient = builder.buildAsyncClient(); Random random = new Random(); StringBuilder stringBuilder = new StringBuilder(); for (int i = 0; i < INDEX_NAME_LENGTH; i++) { stringBuilder.append(ALLOWED_INDEX_CHARACTERS.charAt(random.nextInt(ALLOWED_INDEX_CHARACTERS.length()))); } this.indexName = stringBuilder.toString(); this.searchClient = builder.buildClient().getSearchClient(this.indexName); this.searchAsyncClient = this.searchIndexAsyncClient.getSearchAsyncClient(this.indexName); } @Override public Mono<Void> globalSetupAsync() { return searchIndexAsyncClient .createIndex(new SearchIndex(indexName, SearchIndexAsyncClient.buildSearchFields(Hotel.class, null)) .setSuggesters(new SearchSuggester(SUGGESTER_NAME, Arrays.asList("Description", "HotelName")))) .then(); } @Override public Mono<Void> globalCleanupAsync() { return searchIndexAsyncClient.deleteIndex(indexName); } }
class ServiceTest<TOptions extends PerfStressOptions> extends PerfStressTest<TOptions> { private static final String CONFIGURATION_ERROR = "Configuration %s must be set in either environment variables " + "or system properties.%n"; private static final String ALLOWED_INDEX_CHARACTERS = "abcdefghijklmnopqrstuvwxyz0123456789"; private static final int INDEX_NAME_LENGTH = 24; protected static final String SUGGESTER_NAME = "sg"; protected final SearchClient searchClient; protected final SearchAsyncClient searchAsyncClient; private final SearchIndexAsyncClient searchIndexAsyncClient; private final String indexName; public ServiceTest(TOptions options) { super(options); String searchEndpoint = Configuration.getGlobalConfiguration().get("SEARCH_ENDPOINT"); if (CoreUtils.isNullOrEmpty(searchEndpoint)) { System.out.printf(CONFIGURATION_ERROR, "SEARCH_ENDPOINT"); System.exit(1); } String searchApiKey = Configuration.getGlobalConfiguration().get("SEARCH_API_KEY"); if (CoreUtils.isNullOrEmpty(searchApiKey)) { System.out.printf(CONFIGURATION_ERROR, "SEARCH_API_KEY"); System.exit(1); } SearchIndexClientBuilder builder = new SearchIndexClientBuilder() .endpoint(searchEndpoint) .credential(new AzureKeyCredential(searchApiKey)) .httpClient(new NettyAsyncHttpClientBuilder() .proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888))) .build()); this.searchIndexAsyncClient = builder.buildAsyncClient(); this.indexName = new Random().ints(INDEX_NAME_LENGTH, 0, ALLOWED_INDEX_CHARACTERS.length()) .mapToObj(ALLOWED_INDEX_CHARACTERS::charAt) .collect(StringBuilder::new, StringBuilder::append, StringBuilder::append) .toString(); this.searchClient = builder.buildClient().getSearchClient(this.indexName); this.searchAsyncClient = this.searchIndexAsyncClient.getSearchAsyncClient(this.indexName); } @Override public Mono<Void> globalSetupAsync() { return searchIndexAsyncClient .createIndex(new SearchIndex(indexName, SearchIndexAsyncClient.buildSearchFields(Hotel.class, null)) .setSuggesters(new SearchSuggester(SUGGESTER_NAME, Arrays.asList("Description", "HotelName")))) .then(); } @Override public Mono<Void> globalCleanupAsync() { return searchIndexAsyncClient.deleteIndex(indexName); } }
Will do a slight variant on this, using `random.ints(long streamSize, int lowerBound, int upperBound)`.
public ServiceTest(TOptions options) { super(options); String searchEndpoint = Configuration.getGlobalConfiguration().get("SEARCH_ENDPOINT"); if (CoreUtils.isNullOrEmpty(searchEndpoint)) { System.out.printf(CONFIGURATION_ERROR, "SEARCH_ENDPOINT"); System.exit(1); } String searchApiKey = Configuration.getGlobalConfiguration().get("SEARCH_API_KEY"); if (CoreUtils.isNullOrEmpty(searchApiKey)) { System.out.printf(CONFIGURATION_ERROR, "SEARCH_API_KEY"); System.exit(1); } SearchIndexClientBuilder builder = new SearchIndexClientBuilder() .endpoint(searchEndpoint) .credential(new AzureKeyCredential(searchApiKey)) .httpClient(new NettyAsyncHttpClientBuilder().build()); this.searchIndexAsyncClient = builder.buildAsyncClient(); Random random = new Random(); StringBuilder stringBuilder = new StringBuilder(); for (int i = 0; i < INDEX_NAME_LENGTH; i++) { stringBuilder.append(ALLOWED_INDEX_CHARACTERS.charAt(random.nextInt(ALLOWED_INDEX_CHARACTERS.length()))); } this.indexName = stringBuilder.toString(); this.searchClient = builder.buildClient().getSearchClient(this.indexName); this.searchAsyncClient = this.searchIndexAsyncClient.getSearchAsyncClient(this.indexName); }
this.indexName = stringBuilder.toString();
public ServiceTest(TOptions options) { super(options); String searchEndpoint = Configuration.getGlobalConfiguration().get("SEARCH_ENDPOINT"); if (CoreUtils.isNullOrEmpty(searchEndpoint)) { System.out.printf(CONFIGURATION_ERROR, "SEARCH_ENDPOINT"); System.exit(1); } String searchApiKey = Configuration.getGlobalConfiguration().get("SEARCH_API_KEY"); if (CoreUtils.isNullOrEmpty(searchApiKey)) { System.out.printf(CONFIGURATION_ERROR, "SEARCH_API_KEY"); System.exit(1); } SearchIndexClientBuilder builder = new SearchIndexClientBuilder() .endpoint(searchEndpoint) .credential(new AzureKeyCredential(searchApiKey)) .httpClient(new NettyAsyncHttpClientBuilder() .proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888))) .build()); this.searchIndexAsyncClient = builder.buildAsyncClient(); this.indexName = new Random().ints(INDEX_NAME_LENGTH, 0, ALLOWED_INDEX_CHARACTERS.length()) .mapToObj(ALLOWED_INDEX_CHARACTERS::charAt) .collect(StringBuilder::new, StringBuilder::append, StringBuilder::append) .toString(); this.searchClient = builder.buildClient().getSearchClient(this.indexName); this.searchAsyncClient = this.searchIndexAsyncClient.getSearchAsyncClient(this.indexName); }
class ServiceTest<TOptions extends PerfStressOptions> extends PerfStressTest<TOptions> { private static final String CONFIGURATION_ERROR = "Configuration %s must be set in either environment variables " + "or system properties.%n"; private static final String ALLOWED_INDEX_CHARACTERS = "abcdefghijklmnopqrstuvwxyz0123456789"; private static final int INDEX_NAME_LENGTH = 24; protected static final String SUGGESTER_NAME = "sg"; protected final SearchClient searchClient; protected final SearchAsyncClient searchAsyncClient; private final SearchIndexAsyncClient searchIndexAsyncClient; private final String indexName; @Override public Mono<Void> globalSetupAsync() { return searchIndexAsyncClient .createIndex(new SearchIndex(indexName, SearchIndexAsyncClient.buildSearchFields(Hotel.class, null)) .setSuggesters(new SearchSuggester(SUGGESTER_NAME, Arrays.asList("Description", "HotelName")))) .then(); } @Override public Mono<Void> globalCleanupAsync() { return searchIndexAsyncClient.deleteIndex(indexName); } protected Mono<Void> populateIndex(int documentCount, String documentSize) { /* * Generate the count of documents using the given size. Then, upload the documents in batches of 100, this * prevents the batch from triggering the services request size limit to fail. Finally, continuously poll the * index for its document count until it is equal to the count passed. */ return Mono.defer(() -> { List<Hotel> hotels = DocumentGenerator.generateHotels(documentCount, DocumentSize.valueOf(documentSize)); return Flux.range(0, (int) Math.ceil(hotels.size() / 100D)) .map(i -> hotels.subList(i * 100, Math.min((i + 1) * 100, hotels.size()))) .flatMap(hotelDocuments -> searchAsyncClient.indexDocuments(new IndexDocumentsBatch<Hotel>() .addUploadActions(hotelDocuments))) .then(); }).then(Mono.defer(() -> searchAsyncClient.getDocumentCount() .delaySubscription(Duration.ofSeconds(1)) .filter(count -> count == documentCount) .repeatWhenEmpty(Flux::repeat) .then())); } }
class ServiceTest<TOptions extends PerfStressOptions> extends PerfStressTest<TOptions> { private static final String CONFIGURATION_ERROR = "Configuration %s must be set in either environment variables " + "or system properties.%n"; private static final String ALLOWED_INDEX_CHARACTERS = "abcdefghijklmnopqrstuvwxyz0123456789"; private static final int INDEX_NAME_LENGTH = 24; protected static final String SUGGESTER_NAME = "sg"; protected final SearchClient searchClient; protected final SearchAsyncClient searchAsyncClient; private final SearchIndexAsyncClient searchIndexAsyncClient; private final String indexName; @Override public Mono<Void> globalSetupAsync() { return searchIndexAsyncClient .createIndex(new SearchIndex(indexName, SearchIndexAsyncClient.buildSearchFields(Hotel.class, null)) .setSuggesters(new SearchSuggester(SUGGESTER_NAME, Arrays.asList("Description", "HotelName")))) .then(); } @Override public Mono<Void> globalCleanupAsync() { return searchIndexAsyncClient.deleteIndex(indexName); } protected Mono<Void> populateIndex(int documentCount, String documentSize) { /* * Generate the count of documents using the given size. Then, upload the documents in batches of 100, this * prevents the batch from triggering the services request size limit to fail. Finally, continuously poll the * index for its document count until it is equal to the count passed. */ return Mono.defer(() -> { List<Hotel> hotels = DocumentGenerator.generateHotels(documentCount, DocumentSize.valueOf(documentSize)); return Flux.range(0, (int) Math.ceil(hotels.size() / 100D)) .map(i -> hotels.subList(i * 100, Math.min((i + 1) * 100, hotels.size()))) .flatMap(hotelDocuments -> searchAsyncClient.indexDocuments(new IndexDocumentsBatch<Hotel>() .addUploadActions(hotelDocuments))) .then(); }).then(Mono.defer(() -> searchAsyncClient.getDocumentCount() .delaySubscription(Duration.ofSeconds(1)) .filter(count -> count == documentCount) .repeatWhenEmpty(Flux::repeat) .then())); } }
just checking, Does the Rest API contract always guarantee a response when response code is 200 ? Here and other spots below too.
public Mono<Void> runAsync() { return searchAsyncClient.autocomplete("historic", SUGGESTER_NAME) .count() .flatMap(count -> count > 0 ? Mono.empty() : Mono.error(new RuntimeException("Expected autocomplete results."))); }
.count()
public Mono<Void> runAsync() { return searchAsyncClient.autocomplete("historic", SUGGESTER_NAME) .count() .flatMap(count -> count > 0 ? Mono.empty() : Mono.error(new RuntimeException("Expected autocomplete results."))); }
class AutocompleteTest extends ServiceTest<SearchPerfStressOptions> { public AutocompleteTest(SearchPerfStressOptions options) { super(options); } @Override public Mono<Void> globalSetupAsync() { /* * First, run the global setup in the super class. That will create the index to be used for performance * testing. Then populate the index with a given number of documents. */ return super.globalSetupAsync().then(populateIndex(options.getCount(), options.getDocumentSize())); } @Override public void run() { AtomicInteger count = new AtomicInteger(); searchClient.autocomplete("historic", SUGGESTER_NAME).iterator() .forEachRemaining(ignored -> count.incrementAndGet()); assert count.get() > 0; } @Override }
class AutocompleteTest extends ServiceTest<SearchPerfStressOptions> { public AutocompleteTest(SearchPerfStressOptions options) { super(options); } @Override public Mono<Void> globalSetupAsync() { /* * First, run the global setup in the super class. That will create the index to be used for performance * testing. Then populate the index with a given number of documents. */ return super.globalSetupAsync().then(populateIndex(options.getCount(), options.getDocumentSize())); } @Override public void run() { AtomicInteger count = new AtomicInteger(); searchClient.autocomplete("historic", SUGGESTER_NAME).iterator() .forEachRemaining(ignored -> count.incrementAndGet()); assert count.get() > 0; } @Override }
There is never a complete guarantee on the request being successful, for performance tests how should be handle the case were an operation fails? Do we prevent that iteration from being counted in the metrics?
public Mono<Void> runAsync() { return searchAsyncClient.autocomplete("historic", SUGGESTER_NAME) .count() .flatMap(count -> count > 0 ? Mono.empty() : Mono.error(new RuntimeException("Expected autocomplete results."))); }
.count()
public Mono<Void> runAsync() { return searchAsyncClient.autocomplete("historic", SUGGESTER_NAME) .count() .flatMap(count -> count > 0 ? Mono.empty() : Mono.error(new RuntimeException("Expected autocomplete results."))); }
class AutocompleteTest extends ServiceTest<SearchPerfStressOptions> { public AutocompleteTest(SearchPerfStressOptions options) { super(options); } @Override public Mono<Void> globalSetupAsync() { /* * First, run the global setup in the super class. That will create the index to be used for performance * testing. Then populate the index with a given number of documents. */ return super.globalSetupAsync().then(populateIndex(options.getCount(), options.getDocumentSize())); } @Override public void run() { AtomicInteger count = new AtomicInteger(); searchClient.autocomplete("historic", SUGGESTER_NAME).iterator() .forEachRemaining(ignored -> count.incrementAndGet()); assert count.get() > 0; } @Override }
class AutocompleteTest extends ServiceTest<SearchPerfStressOptions> { public AutocompleteTest(SearchPerfStressOptions options) { super(options); } @Override public Mono<Void> globalSetupAsync() { /* * First, run the global setup in the super class. That will create the index to be used for performance * testing. Then populate the index with a given number of documents. */ return super.globalSetupAsync().then(populateIndex(options.getCount(), options.getDocumentSize())); } @Override public void run() { AtomicInteger count = new AtomicInteger(); searchClient.autocomplete("historic", SUGGESTER_NAME).iterator() .forEachRemaining(ignored -> count.incrementAndGet()); assert count.get() > 0; } @Override }
Forgive my lack of Flux, but in the case where there's a failure state (all retries exhausted), you remove the batch sequence number, each of the event sequence numbers, and don't update state right?
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } messages.add(message); } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (this.enableIdempotentPartitions) { PartitionPublishingState publishingState = this.getClientPartitionPublishingState(batch.getPartitionId()); try { publishingState.getSendingSemaphore().acquire(); } catch (InterruptedException e) { return monoError(logger, new RuntimeException(e)); } return withRetry( getSendLink(batch.getPartitionId()) .map(link -> { int seqNumber = publishingState.getSequenceNumber(); batch.setStartingPublishedSequenceNumber(seqNumber); for (EventData eventData : batch.getEvents()) { eventData.getSystemProperties().put( AmqpMessageConstant.PRODUCER_SEQUENCE_NUMBER_ANNOTATION_NAME.getValue(), seqNumber); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); } return link; }) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy ).publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }) .then(Mono.fromRunnable(() -> { publishingState.increaseSequenceNumber(batch.getCount()); })) .doFinally( signal -> publishingState.getSendingSemaphore().release() ).then(); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } }
.then(Mono.fromRunnable(() -> {
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.incrementSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final Symbol PRODUCER_EPOCH = Symbol.valueOf( AmqpMessageConstant.PRODUCER_EPOCH_ANNOTATION_NAME.getValue()); private static final Symbol PRODUCER_ID = Symbol.valueOf( AmqpMessageConstant.PRODUCER_ID_ANNOTATION_NAME.getValue()); private static final Symbol PRODUCER_SEQUENCE_NUMBER = Symbol.valueOf( AmqpMessageConstant.PRODUCER_SEQUENCE_NUMBER_ANNOTATION_NAME.getValue()); private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean enableIdempotentPartitions; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean enableIdempotentPartitions, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.enableIdempotentPartitions = enableIdempotentPartitions; if (enableIdempotentPartitions) { this.partitionPublishingStates = new HashMap<>(); if (initialPartitionPublishingStates != null) { initialPartitionPublishingStates.forEach((partitionId, state) -> { this.partitionPublishingStates.put(partitionId, new PartitionPublishingState(state)); }); } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Indicate whether this client is an idempotent producer. * @return Whether this client is an idempotent producer. */ public boolean isIdempotentProducer() { return this.enableIdempotentPartitions; } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ public Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.just(publishingState); } else { return withRetry(this.getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(this.getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (this.enableIdempotentPartitions) { if (CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.enableIdempotentPartitions)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.enableIdempotentPartitions)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (this.enableIdempotentPartitions) { return amqpSendLink.getRemoteProperties().map(properties -> { this.setPartitionPublishingState( partitionId, (Long) properties.get(PRODUCER_ID), (Short) properties.get(PRODUCER_EPOCH), (Integer) properties.get(PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!this.enableIdempotentPartitions) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } else { synchronized (this) { if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); this.partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && publishingState.getSequenceNumber() <= sequenceNumber) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> enableIdempotentPartitions ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> this.updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (isIdempotentPartitionPublishing && CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { setPartitionPublishingState( partitionId, (Long) properties.get(ClientConstants.PRODUCER_ID), (Short) properties.get(ClientConstants.PRODUCER_EPOCH), (Integer) properties.get(ClientConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } else { synchronized (partitionPublishingStates) { if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, this.isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
can we flatten this into an &&
public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (this.isIdempotentPartitionPublishing) { if (CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.isIdempotentPartitionPublishing)); })); }
if (CoreUtils.isNullOrEmpty(options.getPartitionId())) {
public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (isIdempotentPartitionPublishing && CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); })); }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(this.getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(this.getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(this.getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(this.getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && this.isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(this.createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = this.getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(this.createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.increaseSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } } private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (this.isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { this.setPartitionPublishingState( partitionId, (Long) properties.get(SymbolConstants.PRODUCER_ID), (Short) properties.get(SymbolConstants.PRODUCER_EPOCH), (Integer) properties.get(SymbolConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!this.isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } else { synchronized (this) { if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); this.partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> this.updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.incrementSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } } private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { setPartitionPublishingState( partitionId, (Long) properties.get(ClientConstants.PRODUCER_ID), (Short) properties.get(ClientConstants.PRODUCER_EPOCH), (Integer) properties.get(ClientConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } else { synchronized (partitionPublishingStates) { if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, this.isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
Good question. `then` won't be reached if the send has an error.
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } messages.add(message); } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (this.enableIdempotentPartitions) { PartitionPublishingState publishingState = this.getClientPartitionPublishingState(batch.getPartitionId()); try { publishingState.getSendingSemaphore().acquire(); } catch (InterruptedException e) { return monoError(logger, new RuntimeException(e)); } return withRetry( getSendLink(batch.getPartitionId()) .map(link -> { int seqNumber = publishingState.getSequenceNumber(); batch.setStartingPublishedSequenceNumber(seqNumber); for (EventData eventData : batch.getEvents()) { eventData.getSystemProperties().put( AmqpMessageConstant.PRODUCER_SEQUENCE_NUMBER_ANNOTATION_NAME.getValue(), seqNumber); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); } return link; }) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy ).publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }) .then(Mono.fromRunnable(() -> { publishingState.increaseSequenceNumber(batch.getCount()); })) .doFinally( signal -> publishingState.getSendingSemaphore().release() ).then(); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } }
.then(Mono.fromRunnable(() -> {
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.incrementSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final Symbol PRODUCER_EPOCH = Symbol.valueOf( AmqpMessageConstant.PRODUCER_EPOCH_ANNOTATION_NAME.getValue()); private static final Symbol PRODUCER_ID = Symbol.valueOf( AmqpMessageConstant.PRODUCER_ID_ANNOTATION_NAME.getValue()); private static final Symbol PRODUCER_SEQUENCE_NUMBER = Symbol.valueOf( AmqpMessageConstant.PRODUCER_SEQUENCE_NUMBER_ANNOTATION_NAME.getValue()); private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean enableIdempotentPartitions; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean enableIdempotentPartitions, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.enableIdempotentPartitions = enableIdempotentPartitions; if (enableIdempotentPartitions) { this.partitionPublishingStates = new HashMap<>(); if (initialPartitionPublishingStates != null) { initialPartitionPublishingStates.forEach((partitionId, state) -> { this.partitionPublishingStates.put(partitionId, new PartitionPublishingState(state)); }); } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Indicate whether this client is an idempotent producer. * @return Whether this client is an idempotent producer. */ public boolean isIdempotentProducer() { return this.enableIdempotentPartitions; } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ public Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.just(publishingState); } else { return withRetry(this.getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(this.getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (this.enableIdempotentPartitions) { if (CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.enableIdempotentPartitions)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.enableIdempotentPartitions)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (this.enableIdempotentPartitions) { return amqpSendLink.getRemoteProperties().map(properties -> { this.setPartitionPublishingState( partitionId, (Long) properties.get(PRODUCER_ID), (Short) properties.get(PRODUCER_EPOCH), (Integer) properties.get(PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!this.enableIdempotentPartitions) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } else { synchronized (this) { if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); this.partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && publishingState.getSequenceNumber() <= sequenceNumber) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> enableIdempotentPartitions ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> this.updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (isIdempotentPartitionPublishing && CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { setPartitionPublishingState( partitionId, (Long) properties.get(ClientConstants.PRODUCER_ID), (Short) properties.get(ClientConstants.PRODUCER_EPOCH), (Integer) properties.get(ClientConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } else { synchronized (partitionPublishingStates) { if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, this.isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
Consistent use of `this`.
Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && this.isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); }
} else if (options.getPartitionId() == null && this.isIdempotentPartitionPublishing) {
Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(this.getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(this.getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(this.getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(this.getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (this.isIdempotentPartitionPublishing) { if (CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.isIdempotentPartitionPublishing)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(this.createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = this.getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(this.createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.increaseSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } } private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (this.isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { this.setPartitionPublishingState( partitionId, (Long) properties.get(SymbolConstants.PRODUCER_ID), (Short) properties.get(SymbolConstants.PRODUCER_EPOCH), (Integer) properties.get(SymbolConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!this.isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } else { synchronized (this) { if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); this.partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> this.updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (isIdempotentPartitionPublishing && CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.incrementSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } } private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { setPartitionPublishingState( partitionId, (Long) properties.get(ClientConstants.PRODUCER_ID), (Short) properties.get(ClientConstants.PRODUCER_EPOCH), (Integer) properties.get(ClientConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } else { synchronized (partitionPublishingStates) { if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, this.isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
Does this part need to be in a mono? it seems ysnchronous to me.
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(this.createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = this.getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(this.createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.increaseSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } }
publishingState.getSemaphore().acquireUninterruptibly();
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.incrementSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(this.getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(this.getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(this.getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(this.getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (this.isIdempotentPartitionPublishing) { if (CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.isIdempotentPartitionPublishing)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && this.isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (this.isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { this.setPartitionPublishingState( partitionId, (Long) properties.get(SymbolConstants.PRODUCER_ID), (Short) properties.get(SymbolConstants.PRODUCER_EPOCH), (Integer) properties.get(SymbolConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!this.isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } else { synchronized (this) { if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); this.partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> this.updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (isIdempotentPartitionPublishing && CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { setPartitionPublishingState( partitionId, (Long) properties.get(ClientConstants.PRODUCER_ID), (Short) properties.get(ClientConstants.PRODUCER_EPOCH), (Integer) properties.get(ClientConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } else { synchronized (partitionPublishingStates) { if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, this.isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
Since we're synchronising on the entire class... is it possible to just synchronise on the parts that need to be?
private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!this.isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } else { synchronized (this) { if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); this.partitionPublishingStates.put(partitionId, state); return state; } } }
synchronized (this) {
private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } else { synchronized (partitionPublishingStates) { if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); partitionPublishingStates.put(partitionId, state); return state; } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(this.getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(this.getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(this.getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(this.getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (this.isIdempotentPartitionPublishing) { if (CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.isIdempotentPartitionPublishing)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && this.isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(this.createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = this.getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(this.createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.increaseSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } } private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (this.isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { this.setPartitionPublishingState( partitionId, (Long) properties.get(SymbolConstants.PRODUCER_ID), (Short) properties.get(SymbolConstants.PRODUCER_EPOCH), (Integer) properties.get(SymbolConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> this.updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (isIdempotentPartitionPublishing && CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.incrementSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } } private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { setPartitionPublishingState( partitionId, (Long) properties.get(ClientConstants.PRODUCER_ID), (Short) properties.get(ClientConstants.PRODUCER_EPOCH), (Integer) properties.get(ClientConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, this.isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
We don't need `this.` It's implied because there is no other variable with the same name in this scope.
private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (this.isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { this.setPartitionPublishingState( partitionId, (Long) properties.get(SymbolConstants.PRODUCER_ID), (Short) properties.get(SymbolConstants.PRODUCER_EPOCH), (Integer) properties.get(SymbolConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } }
if (this.isIdempotentPartitionPublishing) {
private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { setPartitionPublishingState( partitionId, (Long) properties.get(ClientConstants.PRODUCER_ID), (Short) properties.get(ClientConstants.PRODUCER_EPOCH), (Integer) properties.get(ClientConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(this.getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(this.getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(this.getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(this.getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (this.isIdempotentPartitionPublishing) { if (CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.isIdempotentPartitionPublishing)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && this.isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(this.createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = this.getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(this.createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.increaseSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } } private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!this.isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } else { synchronized (this) { if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); this.partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> this.updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (isIdempotentPartitionPublishing && CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.incrementSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } } private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } else { synchronized (partitionPublishingStates) { if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, this.isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
Same with other usages of `this`
private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (this.isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { this.setPartitionPublishingState( partitionId, (Long) properties.get(SymbolConstants.PRODUCER_ID), (Short) properties.get(SymbolConstants.PRODUCER_EPOCH), (Integer) properties.get(SymbolConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } }
if (this.isIdempotentPartitionPublishing) {
private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { setPartitionPublishingState( partitionId, (Long) properties.get(ClientConstants.PRODUCER_ID), (Short) properties.get(ClientConstants.PRODUCER_EPOCH), (Integer) properties.get(ClientConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(this.getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(this.getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(this.getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(this.getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (this.isIdempotentPartitionPublishing) { if (CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.isIdempotentPartitionPublishing)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && this.isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(this.createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = this.getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(this.createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.increaseSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } } private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!this.isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } else { synchronized (this) { if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); this.partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> this.updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (isIdempotentPartitionPublishing && CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.incrementSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } } private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } else { synchronized (partitionPublishingStates) { if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, this.isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
@srnagar I'm torn on using a semaphore for this. thoughts?
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(this.createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = this.getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(this.createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.increaseSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } }
publishingState.getSemaphore().acquireUninterruptibly();
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.incrementSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(this.getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(this.getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(this.getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(this.getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (this.isIdempotentPartitionPublishing) { if (CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.isIdempotentPartitionPublishing)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && this.isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (this.isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { this.setPartitionPublishingState( partitionId, (Long) properties.get(SymbolConstants.PRODUCER_ID), (Short) properties.get(SymbolConstants.PRODUCER_EPOCH), (Integer) properties.get(SymbolConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!this.isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } else { synchronized (this) { if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); this.partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> this.updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (isIdempotentPartitionPublishing && CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { setPartitionPublishingState( partitionId, (Long) properties.get(ClientConstants.PRODUCER_ID), (Short) properties.get(ClientConstants.PRODUCER_EPOCH), (Integer) properties.get(ClientConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } else { synchronized (partitionPublishingStates) { if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, this.isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
The requirement is for a combination of a producer group and a partition, the send should be sequential. No concurrency is allowed because the service will check the producer sequence number assigned by the client. I spent long time in searching for a replacement of Semaphore for this use case. Any suggestions?
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(this.createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = this.getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(this.createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.increaseSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } }
publishingState.getSemaphore().acquireUninterruptibly();
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.incrementSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(this.getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(this.getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(this.getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(this.getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (this.isIdempotentPartitionPublishing) { if (CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.isIdempotentPartitionPublishing)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && this.isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (this.isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { this.setPartitionPublishingState( partitionId, (Long) properties.get(SymbolConstants.PRODUCER_ID), (Short) properties.get(SymbolConstants.PRODUCER_EPOCH), (Integer) properties.get(SymbolConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!this.isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } else { synchronized (this) { if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); this.partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> this.updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (isIdempotentPartitionPublishing && CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { setPartitionPublishingState( partitionId, (Long) properties.get(ClientConstants.PRODUCER_ID), (Short) properties.get(ClientConstants.PRODUCER_EPOCH), (Integer) properties.get(ClientConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } else { synchronized (partitionPublishingStates) { if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, this.isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
Done
public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (this.isIdempotentPartitionPublishing) { if (CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.isIdempotentPartitionPublishing)); })); }
if (CoreUtils.isNullOrEmpty(options.getPartitionId())) {
public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (isIdempotentPartitionPublishing && CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); })); }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(this.getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(this.getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(this.getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(this.getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && this.isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(this.createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = this.getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(this.createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.increaseSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } } private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (this.isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { this.setPartitionPublishingState( partitionId, (Long) properties.get(SymbolConstants.PRODUCER_ID), (Short) properties.get(SymbolConstants.PRODUCER_EPOCH), (Integer) properties.get(SymbolConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!this.isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } else { synchronized (this) { if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); this.partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> this.updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.incrementSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } } private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { setPartitionPublishingState( partitionId, (Long) properties.get(ClientConstants.PRODUCER_ID), (Short) properties.get(ClientConstants.PRODUCER_EPOCH), (Integer) properties.get(ClientConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } else { synchronized (partitionPublishingStates) { if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, this.isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
I actually prefer using `this` because it renders the information that the variable is a instance variable instead of a local. Anyway to sync with other code, I removed the this.
Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && this.isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); }
} else if (options.getPartitionId() == null && this.isIdempotentPartitionPublishing) {
Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(this.getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(this.getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(this.getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(this.getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (this.isIdempotentPartitionPublishing) { if (CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.isIdempotentPartitionPublishing)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(this.createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = this.getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(this.createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.increaseSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } } private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (this.isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { this.setPartitionPublishingState( partitionId, (Long) properties.get(SymbolConstants.PRODUCER_ID), (Short) properties.get(SymbolConstants.PRODUCER_EPOCH), (Integer) properties.get(SymbolConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!this.isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } else { synchronized (this) { if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); this.partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> this.updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (isIdempotentPartitionPublishing && CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.incrementSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } } private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { setPartitionPublishingState( partitionId, (Long) properties.get(ClientConstants.PRODUCER_ID), (Short) properties.get(ClientConstants.PRODUCER_EPOCH), (Integer) properties.get(ClientConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } else { synchronized (partitionPublishingStates) { if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, this.isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
Yes, without a Mono, it's too eager to have the Semaphore and may cause some problems.
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(this.createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = this.getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(this.createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.increaseSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } }
publishingState.getSemaphore().acquireUninterruptibly();
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.incrementSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(this.getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(this.getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(this.getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(this.getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (this.isIdempotentPartitionPublishing) { if (CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.isIdempotentPartitionPublishing)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && this.isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (this.isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { this.setPartitionPublishingState( partitionId, (Long) properties.get(SymbolConstants.PRODUCER_ID), (Short) properties.get(SymbolConstants.PRODUCER_EPOCH), (Integer) properties.get(SymbolConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!this.isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } else { synchronized (this) { if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); this.partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> this.updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (isIdempotentPartitionPublishing && CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { setPartitionPublishingState( partitionId, (Long) properties.get(ClientConstants.PRODUCER_ID), (Short) properties.get(ClientConstants.PRODUCER_EPOCH), (Integer) properties.get(ClientConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } else { synchronized (partitionPublishingStates) { if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, this.isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
Ehm, the constructor adds `this` to the map.
public static Region create(String name, String label) { Objects.requireNonNull(name, "'name' cannot be null."); Region region = VALUES_BY_NAME.get(name.toLowerCase(Locale.ROOT)); if (region != null) { return region; } else { return new Region(name, label); } }
return new Region(name, label);
public static Region create(String name, String label) { Objects.requireNonNull(name, "'name' cannot be null."); Region region = VALUES_BY_NAME.get(name.toLowerCase(Locale.ROOT)); if (region != null) { return region; } else { return new Region(name, label); } }
class Region { private static final ConcurrentMap<String, Region> VALUES_BY_NAME = new ConcurrentHashMap<>(); /* * Azure Cloud - Americas */ /** * East US (US) (recommended) */ public static final Region US_EAST = new Region("eastus", "East US"); /** * East US 2 (US) (recommended) */ public static final Region US_EAST2 = new Region("eastus2", "East US 2"); /** * South Central US (US) (recommended) */ public static final Region US_SOUTH_CENTRAL = new Region("southcentralus", "South Central US"); /** * West US 2 (US) (recommended) */ public static final Region US_WEST2 = new Region("westus2", "West US 2"); /** * Central US (US) (recommended) */ public static final Region US_CENTRAL = new Region("centralus", "Central US"); /** * North Central US (US) (recommended) */ public static final Region US_NORTH_CENTRAL = new Region("northcentralus", "North Central US"); /** * West US (US) (recommended) */ public static final Region US_WEST = new Region("westus", "West US"); /** * West Central US (US) */ public static final Region US_WEST_CENTRAL = new Region("westcentralus", "West Central US"); /** * Canada Central (Canada) (recommended) */ public static final Region CANADA_CENTRAL = new Region("canadacentral", "Canada Central"); /** * Canada East (Canada) */ public static final Region CANADA_EAST = new Region("canadaeast", "Canada East"); /** * Brazil South (South America) (recommended) */ public static final Region BRAZIL_SOUTH = new Region("brazilsouth", "Brazil South"); /** * Brazil Southeast (South America) */ public static final Region BRAZIL_SOUTHEAST = new Region("brazilsoutheast", "Brazil Southeast"); /* * Azure Cloud - Europe */ /** * North Europe (Europe) (recommended) */ public static final Region EUROPE_NORTH = new Region("northeurope", "North Europe"); /** * UK South (Europe) (recommended) */ public static final Region UK_SOUTH = new Region("uksouth", "UK South"); /** * West Europe (Europe) (recommended) */ public static final Region EUROPE_WEST = new Region("westeurope", "West Europe"); /** * France Central (Europe) (recommended) */ public static final Region FRANCE_CENTRAL = new Region("francecentral", "France Central"); /** * Germany West Central (Europe) (recommended) */ public static final Region GERMANY_WEST_CENTRAL = new Region("germanywestcentral", "Germany West Central"); /** * Norway East (Europe) (recommended) */ public static final Region NORWAY_EAST = new Region("norwayeast", "Norway East"); /** * Switzerland North (Europe) (recommended) */ public static final Region SWITZERLAND_NORTH = new Region("switzerlandnorth", "Switzerland North"); /** * France South (Europe) */ public static final Region FRANCE_SOUTH = new Region("francesouth", "France South"); /** * Germany North (Europe) */ public static final Region GERMANY_NORTH = new Region("germanynorth", "Germany North"); /** * Norway West (Europe) */ public static final Region NORWAY_WEST = new Region("norwaywest", "Norway West"); /** * Switzerland West (Europe) */ public static final Region SWITZERLAND_WEST = new Region("switzerlandwest", "Switzerland West"); /** * UK West (Europe) */ public static final Region UK_WEST = new Region("ukwest", "UK West"); /* * Azure Cloud - Asia */ /** * Australia East (Asia Pacific) (recommended) */ public static final Region AUSTRALIA_EAST = new Region("australiaeast", "Australia East"); /** * Southeast Asia (Asia Pacific) (recommended) */ public static final Region ASIA_SOUTHEAST = new Region("southeastasia", "Southeast Asia"); /** * Central India (Asia Pacific) (recommended) */ public static final Region INDIA_CENTRAL = new Region("centralindia", "Central India"); /** * East Asia (Asia Pacific) (recommended) */ public static final Region ASIA_EAST = new Region("eastasia", "East Asia"); /** * Japan East (Asia Pacific) (recommended) */ public static final Region JAPAN_EAST = new Region("japaneast", "Japan East"); /** * Korea Central (Asia Pacific) (recommended) */ public static final Region KOREA_CENTRAL = new Region("koreacentral", "Korea Central"); /** * Australia Central (Asia Pacific) */ public static final Region AUSTRALIA_CENTRAL = new Region("australiacentral", "Australia Central"); /** * Australia Central 2 (Asia Pacific) */ public static final Region AUSTRALIA_CENTRAL2 = new Region("australiacentral2", "Australia Central 2"); /** * Australia Southeast (Asia Pacific) */ public static final Region AUSTRALIA_SOUTHEAST = new Region("australiasoutheast", "Australia Southeast"); /** * Japan West (Asia Pacific) */ public static final Region JAPAN_WEST = new Region("japanwest", "Japan West"); /** * Korea South (Asia Pacific) */ public static final Region KOREA_SOUTH = new Region("koreasouth", "Korea South"); /** * South India (Asia Pacific) */ public static final Region INDIA_SOUTH = new Region("southindia", "South India"); /** * West India (Asia Pacific) */ public static final Region INDIA_WEST = new Region("westindia", "West India"); /* * Azure Cloud - Middle East and Africa */ /** * UAE North (Middle East) (recommended) */ public static final Region UAE_NORTH = new Region("uaenorth", "UAE North"); /** * UAE Central (Middle East) */ public static final Region UAE_CENTRAL = new Region("uaecentral", "UAE Central"); /** * South Africa North (Africa) (recommended) */ public static final Region SOUTHAFRICA_NORTH = new Region("southafricanorth", "South Africa North"); /** * South Africa West (Africa) */ public static final Region SOUTHAFRICA_WEST = new Region("southafricawest", "South Africa West"); /* * Azure China Cloud */ /** * China North */ public static final Region CHINA_NORTH = new Region("chinanorth", "China North"); /** * China East */ public static final Region CHINA_EAST = new Region("chinaeast", "China East"); /** * China North 2 */ public static final Region CHINA_NORTH2 = new Region("chinanorth2", "China North 2"); /** * China East 2 */ public static final Region CHINA_EAST2 = new Region("chinaeast2", "China East 2"); /* * Azure German Cloud */ /** * Germany Central */ public static final Region GERMANY_CENTRAL = new Region("germanycentral", "Germany Central"); /** * Germany Northeast */ public static final Region GERMANY_NORTHEAST = new Region("germanynortheast", "Germany Northeast"); /* * Azure Government Cloud */ /** * U.S. government cloud in Virginia. */ public static final Region GOV_US_VIRGINIA = new Region("usgovvirginia", "US Gov Virginia"); /** * U.S. government cloud in Iowa. */ public static final Region GOV_US_IOWA = new Region("usgoviowa", "US Gov Iowa"); /** * U.S. government cloud in Arizona. */ public static final Region GOV_US_ARIZONA = new Region("usgovarizona", "US Gov Arizona"); /** * U.S. government cloud in Texas. */ public static final Region GOV_US_TEXAS = new Region("usgovtexas", "US Gov Texas"); /** * U.S. Department of Defense cloud - East. */ public static final Region GOV_US_DOD_EAST = new Region("usdodeast", "US DoD East"); /** * U.S. Department of Defense cloud - Central. */ public static final Region GOV_US_DOD_CENTRAL = new Region("usdodcentral", "US DoD Central"); private final String name; private final String label; /** * @return predefined Azure regions */ public static Region[] values() { Collection<Region> valuesCollection = VALUES_BY_NAME.values(); return valuesCollection.toArray(new Region[valuesCollection.size()]); } private Region(String name, String label) { this.name = name; this.label = label; VALUES_BY_NAME.put(name.toLowerCase(Locale.ROOT), this); } /** * Creates a region from a name and a label. * * @param name the uniquely identifiable name of the region * @param label the label of the region * @return the newly created region */ @JsonValue @Override public String toString() { return name(); } /** * @return the name of the region */ public String name() { return this.name; } /** * @return the label of the region */ public String label() { return this.label; } /** * Parses a name into a Region object and creates a new Region instance if not found among the existing ones. * * @param name the name of the region * @return the parsed or created region */ public static Region fromName(String name) { if (name == null) { return null; } Region region = VALUES_BY_NAME.get(name.toLowerCase(Locale.ROOT).replace(" ", "")); if (region != null) { return region; } else { return Region.create(name.toLowerCase(Locale.ROOT).replace(" ", ""), name); } } @Override public int hashCode() { return this.name.hashCode(); } @Override public boolean equals(Object obj) { if (!(obj instanceof Region)) { return false; } else if (obj == this) { return true; } else { Region rhs = (Region) obj; return this.name.equalsIgnoreCase(rhs.name); } } }
class Region { private static final ConcurrentMap<String, Region> VALUES_BY_NAME = new ConcurrentHashMap<>(); /* * Azure Cloud - Americas */ /** * East US (US) (recommended) */ public static final Region US_EAST = new Region("eastus", "East US"); /** * East US 2 (US) (recommended) */ public static final Region US_EAST2 = new Region("eastus2", "East US 2"); /** * South Central US (US) (recommended) */ public static final Region US_SOUTH_CENTRAL = new Region("southcentralus", "South Central US"); /** * West US 2 (US) (recommended) */ public static final Region US_WEST2 = new Region("westus2", "West US 2"); /** * Central US (US) (recommended) */ public static final Region US_CENTRAL = new Region("centralus", "Central US"); /** * North Central US (US) (recommended) */ public static final Region US_NORTH_CENTRAL = new Region("northcentralus", "North Central US"); /** * West US (US) (recommended) */ public static final Region US_WEST = new Region("westus", "West US"); /** * West Central US (US) */ public static final Region US_WEST_CENTRAL = new Region("westcentralus", "West Central US"); /** * Canada Central (Canada) (recommended) */ public static final Region CANADA_CENTRAL = new Region("canadacentral", "Canada Central"); /** * Canada East (Canada) */ public static final Region CANADA_EAST = new Region("canadaeast", "Canada East"); /** * Brazil South (South America) (recommended) */ public static final Region BRAZIL_SOUTH = new Region("brazilsouth", "Brazil South"); /** * Brazil Southeast (South America) */ public static final Region BRAZIL_SOUTHEAST = new Region("brazilsoutheast", "Brazil Southeast"); /* * Azure Cloud - Europe */ /** * North Europe (Europe) (recommended) */ public static final Region EUROPE_NORTH = new Region("northeurope", "North Europe"); /** * UK South (Europe) (recommended) */ public static final Region UK_SOUTH = new Region("uksouth", "UK South"); /** * West Europe (Europe) (recommended) */ public static final Region EUROPE_WEST = new Region("westeurope", "West Europe"); /** * France Central (Europe) (recommended) */ public static final Region FRANCE_CENTRAL = new Region("francecentral", "France Central"); /** * Germany West Central (Europe) (recommended) */ public static final Region GERMANY_WEST_CENTRAL = new Region("germanywestcentral", "Germany West Central"); /** * Norway East (Europe) (recommended) */ public static final Region NORWAY_EAST = new Region("norwayeast", "Norway East"); /** * Switzerland North (Europe) (recommended) */ public static final Region SWITZERLAND_NORTH = new Region("switzerlandnorth", "Switzerland North"); /** * France South (Europe) */ public static final Region FRANCE_SOUTH = new Region("francesouth", "France South"); /** * Germany North (Europe) */ public static final Region GERMANY_NORTH = new Region("germanynorth", "Germany North"); /** * Norway West (Europe) */ public static final Region NORWAY_WEST = new Region("norwaywest", "Norway West"); /** * Switzerland West (Europe) */ public static final Region SWITZERLAND_WEST = new Region("switzerlandwest", "Switzerland West"); /** * UK West (Europe) */ public static final Region UK_WEST = new Region("ukwest", "UK West"); /* * Azure Cloud - Asia */ /** * Australia East (Asia Pacific) (recommended) */ public static final Region AUSTRALIA_EAST = new Region("australiaeast", "Australia East"); /** * Southeast Asia (Asia Pacific) (recommended) */ public static final Region ASIA_SOUTHEAST = new Region("southeastasia", "Southeast Asia"); /** * Central India (Asia Pacific) (recommended) */ public static final Region INDIA_CENTRAL = new Region("centralindia", "Central India"); /** * East Asia (Asia Pacific) (recommended) */ public static final Region ASIA_EAST = new Region("eastasia", "East Asia"); /** * Japan East (Asia Pacific) (recommended) */ public static final Region JAPAN_EAST = new Region("japaneast", "Japan East"); /** * Korea Central (Asia Pacific) (recommended) */ public static final Region KOREA_CENTRAL = new Region("koreacentral", "Korea Central"); /** * Australia Central (Asia Pacific) */ public static final Region AUSTRALIA_CENTRAL = new Region("australiacentral", "Australia Central"); /** * Australia Central 2 (Asia Pacific) */ public static final Region AUSTRALIA_CENTRAL2 = new Region("australiacentral2", "Australia Central 2"); /** * Australia Southeast (Asia Pacific) */ public static final Region AUSTRALIA_SOUTHEAST = new Region("australiasoutheast", "Australia Southeast"); /** * Japan West (Asia Pacific) */ public static final Region JAPAN_WEST = new Region("japanwest", "Japan West"); /** * Korea South (Asia Pacific) */ public static final Region KOREA_SOUTH = new Region("koreasouth", "Korea South"); /** * South India (Asia Pacific) */ public static final Region INDIA_SOUTH = new Region("southindia", "South India"); /** * West India (Asia Pacific) */ public static final Region INDIA_WEST = new Region("westindia", "West India"); /* * Azure Cloud - Middle East and Africa */ /** * UAE North (Middle East) (recommended) */ public static final Region UAE_NORTH = new Region("uaenorth", "UAE North"); /** * UAE Central (Middle East) */ public static final Region UAE_CENTRAL = new Region("uaecentral", "UAE Central"); /** * South Africa North (Africa) (recommended) */ public static final Region SOUTHAFRICA_NORTH = new Region("southafricanorth", "South Africa North"); /** * South Africa West (Africa) */ public static final Region SOUTHAFRICA_WEST = new Region("southafricawest", "South Africa West"); /* * Azure China Cloud */ /** * China North */ public static final Region CHINA_NORTH = new Region("chinanorth", "China North"); /** * China East */ public static final Region CHINA_EAST = new Region("chinaeast", "China East"); /** * China North 2 */ public static final Region CHINA_NORTH2 = new Region("chinanorth2", "China North 2"); /** * China East 2 */ public static final Region CHINA_EAST2 = new Region("chinaeast2", "China East 2"); /* * Azure German Cloud */ /** * Germany Central */ public static final Region GERMANY_CENTRAL = new Region("germanycentral", "Germany Central"); /** * Germany Northeast */ public static final Region GERMANY_NORTHEAST = new Region("germanynortheast", "Germany Northeast"); /* * Azure Government Cloud */ /** * U.S. government cloud in Virginia. */ public static final Region GOV_US_VIRGINIA = new Region("usgovvirginia", "US Gov Virginia"); /** * U.S. government cloud in Iowa. */ public static final Region GOV_US_IOWA = new Region("usgoviowa", "US Gov Iowa"); /** * U.S. government cloud in Arizona. */ public static final Region GOV_US_ARIZONA = new Region("usgovarizona", "US Gov Arizona"); /** * U.S. government cloud in Texas. */ public static final Region GOV_US_TEXAS = new Region("usgovtexas", "US Gov Texas"); /** * U.S. Department of Defense cloud - East. */ public static final Region GOV_US_DOD_EAST = new Region("usdodeast", "US DoD East"); /** * U.S. Department of Defense cloud - Central. */ public static final Region GOV_US_DOD_CENTRAL = new Region("usdodcentral", "US DoD Central"); private final String name; private final String label; /** * @return predefined Azure regions. */ public static Collection<Region> values() { return VALUES_BY_NAME.values(); } private Region(String name, String label) { this.name = name; this.label = label; VALUES_BY_NAME.put(name.toLowerCase(Locale.ROOT), this); } /** * Creates a region from a name and a label. * * @param name the uniquely identifiable name of the region. * @param label the label of the region. * @return the newly created region. */ @JsonValue @Override public String toString() { return name(); } /** * @return the name of the region. */ public String name() { return this.name; } /** * @return the label of the region. */ public String label() { return this.label; } /** * Parses a name into a Region object and creates a new Region instance if not found among the existing ones. * * @param name the name of the region. * @return the parsed or created region. */ public static Region fromName(String name) { if (name == null) { return null; } Region region = VALUES_BY_NAME.get(name.toLowerCase(Locale.ROOT).replace(" ", "")); if (region != null) { return region; } else { return Region.create(name.toLowerCase(Locale.ROOT).replace(" ", ""), name); } } @Override public int hashCode() { return this.name.hashCode(); } @Override public boolean equals(Object obj) { if (!(obj instanceof Region)) { return false; } else if (obj == this) { return true; } else { Region rhs = (Region) obj; return this.name.equalsIgnoreCase(rhs.name); } } }
Changed to synchronize on `partitionPublishingStates`.
private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!this.isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } else { synchronized (this) { if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); this.partitionPublishingStates.put(partitionId, state); return state; } } }
synchronized (this) {
private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } else { synchronized (partitionPublishingStates) { if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); partitionPublishingStates.put(partitionId, state); return state; } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(this.getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(this.getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(this.getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(this.getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (this.isIdempotentPartitionPublishing) { if (CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.isIdempotentPartitionPublishing)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && this.isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(this.createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = this.getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(this.createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.increaseSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } } private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (this.isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { this.setPartitionPublishingState( partitionId, (Long) properties.get(SymbolConstants.PRODUCER_ID), (Short) properties.get(SymbolConstants.PRODUCER_EPOCH), (Integer) properties.get(SymbolConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> this.updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (isIdempotentPartitionPublishing && CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.incrementSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } } private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { setPartitionPublishingState( partitionId, (Long) properties.get(ClientConstants.PRODUCER_ID), (Short) properties.get(ClientConstants.PRODUCER_EPOCH), (Integer) properties.get(ClientConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, this.isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
Removed.
private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (this.isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { this.setPartitionPublishingState( partitionId, (Long) properties.get(SymbolConstants.PRODUCER_ID), (Short) properties.get(SymbolConstants.PRODUCER_EPOCH), (Integer) properties.get(SymbolConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } }
if (this.isIdempotentPartitionPublishing) {
private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { setPartitionPublishingState( partitionId, (Long) properties.get(ClientConstants.PRODUCER_ID), (Short) properties.get(ClientConstants.PRODUCER_EPOCH), (Integer) properties.get(ClientConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(this.getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(this.getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(this.getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(this.getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (this.isIdempotentPartitionPublishing) { if (CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.isIdempotentPartitionPublishing)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && this.isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(this.createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = this.getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(this.createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.increaseSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } } private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!this.isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } else { synchronized (this) { if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); this.partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> this.updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (isIdempotentPartitionPublishing && CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.incrementSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } } private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } else { synchronized (partitionPublishingStates) { if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, this.isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
You're doing this assignment in each iteration of the for loop?
public EventHubClientBuilder initialPartitionPublishingStates(Map<String, PartitionPublishingProperties> states) { if (states != null) { this.initialPartitionPublishingStates = new HashMap<>(); states.forEach((partitionId, state) -> { this.initialPartitionPublishingStates.put(partitionId, new PartitionPublishingState(state)); this.initialPartitionPublishingStates = Collections.unmodifiableMap(this.initialPartitionPublishingStates); }); } else { this.initialPartitionPublishingStates = null; } return this; }
this.initialPartitionPublishingStates =
public EventHubClientBuilder initialPartitionPublishingStates(Map<String, PartitionPublishingProperties> states) { if (states != null) { this.initialPartitionPublishingStates = new HashMap<>(); states.forEach((partitionId, state) -> { this.initialPartitionPublishingStates.put(partitionId, new PartitionPublishingState(state)); }); this.initialPartitionPublishingStates = Collections.unmodifiableMap(this.initialPartitionPublishingStates); } else { this.initialPartitionPublishingStates = null; } return this; }
class EventHubClientBuilder { static final int DEFAULT_PREFETCH_COUNT = 500; static final int DEFAULT_PREFETCH_COUNT_FOR_SYNC_CLIENT = 1; /** * The name of the default consumer group in the Event Hubs service. */ public static final String DEFAULT_CONSUMER_GROUP_NAME = "$Default"; /** * The minimum value allowed for the prefetch count of the consumer. */ private static final int MINIMUM_PREFETCH_COUNT = 1; /** * The maximum value allowed for the prefetch count of the consumer. */ private static final int MAXIMUM_PREFETCH_COUNT = 8000; private static final String EVENTHUBS_PROPERTIES_FILE = "azure-messaging-eventhubs.properties"; private static final String NAME_KEY = "name"; private static final String VERSION_KEY = "version"; private static final String UNKNOWN = "UNKNOWN"; private static final String AZURE_EVENT_HUBS_CONNECTION_STRING = "AZURE_EVENT_HUBS_CONNECTION_STRING"; private static final AmqpRetryOptions DEFAULT_RETRY = new AmqpRetryOptions() .setTryTimeout(ClientConstants.OPERATION_TIMEOUT); private static final Pattern HOST_PORT_PATTERN = Pattern.compile("^[^:]+:\\d+"); private final ClientLogger logger = new ClientLogger(EventHubClientBuilder.class); private final Object connectionLock = new Object(); private final AtomicBoolean isSharedConnection = new AtomicBoolean(); private TokenCredential credentials; private Configuration configuration; private ProxyOptions proxyOptions; private AmqpRetryOptions retryOptions; private Scheduler scheduler; private AmqpTransportType transport; private String fullyQualifiedNamespace; private String eventHubName; private String consumerGroup; private EventHubConnectionProcessor eventHubConnectionProcessor; private Integer prefetchCount; private boolean isIdempotentPartitionPublishing; private Map<String, PartitionPublishingState> initialPartitionPublishingStates; /** * Keeps track of the open clients that were created from this builder when there is a shared connection. */ private final AtomicInteger openClients = new AtomicInteger(); /** * Creates a new instance with the default transport {@link AmqpTransportType * non-shared connection means that a dedicated AMQP connection is created for every Event Hub consumer or producer * created using the builder. */ public EventHubClientBuilder() { transport = AmqpTransportType.AMQP; } /** * Sets the credential information given a connection string to the Event Hub instance. * * <p> * If the connection string is copied from the Event Hubs namespace, it will likely not contain the name to the * desired Event Hub, which is needed. In this case, the name can be added manually by adding {@literal * "EntityPath=EVENT_HUB_NAME"} to the end of the connection string. For example, "EntityPath=telemetry-hub". * </p> * * <p> * If you have defined a shared access policy directly on the Event Hub itself, then copying the connection string * from that Event Hub will result in a connection string that contains the name. * </p> * * @param connectionString The connection string to use for connecting to the Event Hub instance. It is expected * that the Event Hub name and the shared access key properties are contained in this connection string. * * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code connectionString} is null or empty. Or, the {@code * connectionString} does not contain the "EntityPath" key, which is the name of the Event Hub instance. * @throws AzureException If the shared access signature token credential could not be created using the * connection string. */ public EventHubClientBuilder connectionString(String connectionString) { ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); TokenCredential tokenCredential = getTokenCredential(properties); return credential(properties.getEndpoint().getHost(), properties.getEntityPath(), tokenCredential); } private TokenCredential getTokenCredential(ConnectionStringProperties properties) { TokenCredential tokenCredential; if (properties.getSharedAccessSignature() == null) { tokenCredential = new EventHubSharedKeyCredential(properties.getSharedAccessKeyName(), properties.getSharedAccessKey(), ClientConstants.TOKEN_VALIDITY); } else { tokenCredential = new EventHubSharedKeyCredential(properties.getSharedAccessSignature()); } return tokenCredential; } /** * Sets the credential information given a connection string to the Event Hubs namespace and name to a specific * Event Hub instance. * * @param connectionString The connection string to use for connecting to the Event Hubs namespace; it is * expected that the shared access key properties are contained in this connection string, but not the Event Hub * name. * @param eventHubName The name of the Event Hub to connect the client to. * * @return The updated {@link EventHubClientBuilder} object. * @throws NullPointerException if {@code connectionString} or {@code eventHubName} is null. * @throws IllegalArgumentException if {@code connectionString} or {@code eventHubName} is an empty string. Or, * if the {@code connectionString} contains the Event Hub name. * @throws AzureException If the shared access signature token credential could not be created using the * connection string. */ public EventHubClientBuilder connectionString(String connectionString, String eventHubName) { Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); if (connectionString.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException( "'connectionString' cannot be an empty string.")); } else if (eventHubName.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException("'eventHubName' cannot be an empty string.")); } final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); TokenCredential tokenCredential = getTokenCredential(properties); if (!CoreUtils.isNullOrEmpty(properties.getEntityPath()) && !eventHubName.equals(properties.getEntityPath())) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "'connectionString' contains an Event Hub name [%s] and it does not match the given " + "'eventHubName' parameter [%s]. Please use the credentials(String connectionString) overload. " + "Or supply a 'connectionString' without 'EntityPath' in it.", properties.getEntityPath(), eventHubName))); } return credential(properties.getEndpoint().getHost(), eventHubName, tokenCredential); } /** * Sets the configuration store that is used during construction of the service client. * * If not specified, the default configuration store is used to configure the {@link EventHubAsyncClient}. Use * {@link Configuration * * @param configuration The configuration store used to configure the {@link EventHubAsyncClient}. * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Toggles the builder to use the same connection for producers or consumers that are built from this instance. By * default, a new connection is constructed and used created for each Event Hub consumer or producer created. * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder shareConnection() { this.isSharedConnection.set(true); return this; } /** * Sets the credential information for which Event Hub instance to connect to, and how to authorize against it. * * @param fullyQualifiedNamespace The fully qualified name for the Event Hubs namespace. This is likely to be * similar to <strong>{@literal "{your-namespace}.servicebus.windows.net}"</strong>. * @param eventHubName The name of the Event Hub to connect the client to. * @param credential The token credential to use for authorization. Access controls may be specified by the * Event Hubs namespace or the requested Event Hub, depending on Azure configuration. * * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code fullyQualifiedNamespace} or {@code eventHubName} is an empty * string. * @throws NullPointerException if {@code fullyQualifiedNamespace}, {@code eventHubName}, {@code credentials} is * null. */ public EventHubClientBuilder credential(String fullyQualifiedNamespace, String eventHubName, TokenCredential credential) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw logger.logExceptionAsError(new IllegalArgumentException("'host' cannot be an empty string.")); } else if (CoreUtils.isNullOrEmpty(eventHubName)) { throw logger.logExceptionAsError(new IllegalArgumentException("'eventHubName' cannot be an empty string.")); } return this; } /** * Sets the proxy configuration to use for {@link EventHubAsyncClient}. When a proxy is configured, {@link * AmqpTransportType * * @param proxyOptions The proxy configuration to use. * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder proxyOptions(ProxyOptions proxyOptions) { this.proxyOptions = proxyOptions; return this; } /** * Sets the transport type by which all the communication with Azure Event Hubs occurs. Default value is {@link * AmqpTransportType * * @param transport The transport type to use. * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder transportType(AmqpTransportType transport) { this.transport = transport; return this; } /** * Sets the retry policy for {@link EventHubAsyncClient}. If not specified, the default retry options are used. * * @param retryOptions The retry policy to use. * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder retry(AmqpRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Sets the name of the consumer group this consumer is associated with. Events are read in the context of this * group. The name of the consumer group that is created by default is {@link * "$Default"}. * * @param consumerGroup The name of the consumer group this consumer is associated with. Events are read in the * context of this group. The name of the consumer group that is created by default is {@link * * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder consumerGroup(String consumerGroup) { this.consumerGroup = consumerGroup; return this; } /** * Sets the count used by the receiver to control the number of events the Event Hub consumer will actively receive * and queue locally without regard to whether a receive operation is currently active. * * @param prefetchCount The amount of events to queue locally. * * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code prefetchCount} is less than {@link * greater than {@link */ public EventHubClientBuilder prefetchCount(int prefetchCount) { if (prefetchCount < MINIMUM_PREFETCH_COUNT) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "PrefetchCount, '%s' has to be above %s", prefetchCount, MINIMUM_PREFETCH_COUNT))); } if (prefetchCount > MAXIMUM_PREFETCH_COUNT) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "PrefetchCount, '%s', has to be below %s", prefetchCount, MAXIMUM_PREFETCH_COUNT))); } this.prefetchCount = prefetchCount; return this; } /** * Enables idempotent publishing when an {@link EventHubProducerAsyncClient} or {@link EventHubProducerClient} * is built. * * If enabled, the producer will only be able to publish directly to partitions; it will not be able to publish to * the Event Hubs gateway for automatic partition routing nor using a partition key. * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder enableIdempotentPartitionPublishing() { this.isIdempotentPartitionPublishing = true; return this; } /** * Sets the idempotent publishing options to {@link EventHubProducerAsyncClient} or {@link EventHubProducerClient} * when you build them. * * The set of options that can be specified to influence publishing behavior specific to the configured Event Hub * partition. * These options are not necessary in the majority of scenarios and are intended for use with specialized scenarios, * such as when recovering the state used for idempotent publishing. * * It is highly recommended that these options only be specified if there is a proven need to do so; Incorrectly * configuring these values may result in the built {@link EventHubProducerAsyncClient} or * {@link EventHubProducerClient} instance unable to publish to the Event Hubs. * * These options are ignored when publishing to the Event Hubs gateway for automatic routing or when using a * partition key. * * @param states A {@link Map} of {@link PartitionPublishingProperties} for each partition. The keys of the map * are the partition ids. * @return The updated {@link EventHubClientBuilder} object. */ /** * Package-private method that sets the scheduler for the created Event Hub client. * * @param scheduler Scheduler to set. * * @return The updated {@link EventHubClientBuilder} object. */ EventHubClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Creates a new {@link EventHubConsumerAsyncClient} based on the options set on this builder. Every time {@code * buildAsyncConsumer()} is invoked, a new instance of {@link EventHubConsumerAsyncClient} is created. * * @return A new {@link EventHubConsumerAsyncClient} with the configured options. * @throws IllegalArgumentException If shared connection is not used and the credentials have not been set using * either {@link * {@link * {@link AmqpTransportType */ public EventHubConsumerAsyncClient buildAsyncConsumerClient() { if (CoreUtils.isNullOrEmpty(consumerGroup)) { throw logger.logExceptionAsError(new IllegalArgumentException("'consumerGroup' cannot be null or an empty " + "string. using EventHubClientBuilder.consumerGroup(String)")); } return buildAsyncClient().createConsumer(consumerGroup, prefetchCount); } /** * Creates a new {@link EventHubConsumerClient} based on the options set on this builder. Every time {@code * buildConsumer()} is invoked, a new instance of {@link EventHubConsumerClient} is created. * * @return A new {@link EventHubConsumerClient} with the configured options. * @throws IllegalArgumentException If shared connection is not used and the credentials have not been set using * either {@link * {@link * {@link AmqpTransportType */ public EventHubConsumerClient buildConsumerClient() { return buildClient().createConsumer(consumerGroup, prefetchCount); } /** * Creates a new {@link EventHubProducerAsyncClient} based on options set on this builder. Every time {@code * buildAsyncProducer()} is invoked, a new instance of {@link EventHubProducerAsyncClient} is created. * * @return A new {@link EventHubProducerAsyncClient} instance with all the configured options. * @throws IllegalArgumentException If shared connection is not used and the credentials have not been set using * either {@link * proxy is specified but the transport type is not {@link AmqpTransportType */ public EventHubProducerAsyncClient buildAsyncProducerClient() { if (initialPartitionPublishingStates != null && !isIdempotentPartitionPublishing) { throw logger.logExceptionAsError(new IllegalArgumentException("'initialPartitionPublishingStates' " + "shouldn't be set if 'idempotentPartitionPublishing' is not true.")); } return buildAsyncClient().createProducer(); } /** * Creates a new {@link EventHubProducerClient} based on options set on this builder. Every time {@code * buildAsyncProducer()} is invoked, a new instance of {@link EventHubProducerClient} is created. * * @return A new {@link EventHubProducerClient} instance with all the configured options. * @throws IllegalArgumentException If shared connection is not used and the credentials have not been set using * either {@link * proxy is specified but the transport type is not {@link AmqpTransportType */ public EventHubProducerClient buildProducerClient() { return buildClient().createProducer(); } /** * Creates a new {@link EventHubAsyncClient} based on options set on this builder. Every time {@code * buildAsyncClient()} is invoked, a new instance of {@link EventHubAsyncClient} is created. * * <p> * The following options are used if ones are not specified in the builder: * * <ul> * <li>If no configuration is specified, the {@link Configuration * is used to provide any shared configuration values. The configuration values read are the {@link * Configuration * ProxyOptions * <li>If no retry is specified, the default retry options are used.</li> * <li>If no proxy is specified, the builder checks the {@link Configuration * configuration} for a configured proxy, then it checks to see if a system proxy is configured.</li> * <li>If no timeout is specified, a {@link ClientConstants * <li>If no scheduler is specified, an {@link Schedulers * </ul> * * @return A new {@link EventHubAsyncClient} instance with all the configured options. * @throws IllegalArgumentException if the credentials have not been set using either {@link * * specified but the transport type is not {@link AmqpTransportType */ EventHubAsyncClient buildAsyncClient() { if (retryOptions == null) { retryOptions = DEFAULT_RETRY; } if (scheduler == null) { scheduler = Schedulers.elastic(); } if (prefetchCount == null) { prefetchCount = DEFAULT_PREFETCH_COUNT; } final MessageSerializer messageSerializer = new EventHubMessageSerializer(); final EventHubConnectionProcessor processor; if (isSharedConnection.get()) { synchronized (connectionLock) { if (eventHubConnectionProcessor == null) { eventHubConnectionProcessor = buildConnectionProcessor(messageSerializer); } } processor = eventHubConnectionProcessor; final int numberOfOpenClients = openClients.incrementAndGet(); logger.info(" } else { processor = buildConnectionProcessor(messageSerializer); } final TracerProvider tracerProvider = new TracerProvider(ServiceLoader.load(Tracer.class)); return new EventHubAsyncClient(processor, tracerProvider, messageSerializer, scheduler, isSharedConnection.get(), this::onClientClose, isIdempotentPartitionPublishing, initialPartitionPublishingStates); } /** * Creates a new {@link EventHubClient} based on options set on this builder. Every time {@code buildClient()} is * invoked, a new instance of {@link EventHubClient} is created. * * <p> * The following options are used if ones are not specified in the builder: * * <ul> * <li>If no configuration is specified, the {@link Configuration * is used to provide any shared configuration values. The configuration values read are the {@link * Configuration * ProxyOptions * <li>If no retry is specified, the default retry options are used.</li> * <li>If no proxy is specified, the builder checks the {@link Configuration * configuration} for a configured proxy, then it checks to see if a system proxy is configured.</li> * <li>If no timeout is specified, a {@link ClientConstants * <li>If no scheduler is specified, an {@link Schedulers * </ul> * * @return A new {@link EventHubClient} instance with all the configured options. * @throws IllegalArgumentException if the credentials have not been set using either {@link * * specified but the transport type is not {@link AmqpTransportType */ EventHubClient buildClient() { if (prefetchCount == null) { prefetchCount = DEFAULT_PREFETCH_COUNT_FOR_SYNC_CLIENT; } final EventHubAsyncClient client = buildAsyncClient(); return new EventHubClient(client, retryOptions); } void onClientClose() { synchronized (connectionLock) { final int numberOfOpenClients = openClients.decrementAndGet(); logger.info("Closing a dependent client. if (numberOfOpenClients > 0) { return; } if (numberOfOpenClients < 0) { logger.warning("There should not be less than 0 clients. actual: {}", numberOfOpenClients); } logger.info("No more open clients, closing shared connection."); if (eventHubConnectionProcessor != null) { eventHubConnectionProcessor.dispose(); eventHubConnectionProcessor = null; } else { logger.warning("Shared EventHubConnectionProcessor was already disposed."); } } } private EventHubConnectionProcessor buildConnectionProcessor(MessageSerializer messageSerializer) { final ConnectionOptions connectionOptions = getConnectionOptions(); final TokenManagerProvider tokenManagerProvider = new AzureTokenManagerProvider( connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), ClientConstants.AZURE_ACTIVE_DIRECTORY_SCOPE); final ReactorProvider provider = new ReactorProvider(); final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider); final Map<String, String> properties = CoreUtils.getProperties(EVENTHUBS_PROPERTIES_FILE); final String product = properties.getOrDefault(NAME_KEY, UNKNOWN); final String clientVersion = properties.getOrDefault(VERSION_KEY, UNKNOWN); final Flux<EventHubAmqpConnection> connectionFlux = Flux.create(sink -> { sink.onRequest(request -> { if (request == 0) { return; } else if (request > 1) { sink.error(logger.logExceptionAsWarning(new IllegalArgumentException( "Requested more than one connection. Only emitting one. Request: " + request))); return; } final String connectionId = StringUtil.getRandomString("MF"); logger.info("connectionId[{}]: Emitting a single connection.", connectionId); final EventHubAmqpConnection connection = new EventHubReactorAmqpConnection(connectionId, connectionOptions, eventHubName, provider, handlerProvider, tokenManagerProvider, messageSerializer, product, clientVersion); sink.next(connection); }); }); return connectionFlux.subscribeWith(new EventHubConnectionProcessor( connectionOptions.getFullyQualifiedNamespace(), eventHubName, connectionOptions.getRetry())); } private ConnectionOptions getConnectionOptions() { configuration = configuration == null ? Configuration.getGlobalConfiguration().clone() : configuration; if (credentials == null) { final String connectionString = configuration.get(AZURE_EVENT_HUBS_CONNECTION_STRING); if (CoreUtils.isNullOrEmpty(connectionString)) { throw logger.logExceptionAsError(new IllegalArgumentException("Credentials have not been set. " + "They can be set using: connectionString(String), connectionString(String, String), " + "credentials(String, String, TokenCredential), or setting the environment variable '" + AZURE_EVENT_HUBS_CONNECTION_STRING + "' with a connection string")); } connectionString(connectionString); } if (proxyOptions == null) { proxyOptions = getDefaultProxyConfiguration(configuration); } if (proxyOptions != null && proxyOptions.isProxyAddressConfigured() && transport != AmqpTransportType.AMQP_WEB_SOCKETS) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot use a proxy when TransportType is not AMQP Web Sockets.")); } final CbsAuthorizationType authorizationType = credentials instanceof EventHubSharedKeyCredential ? CbsAuthorizationType.SHARED_ACCESS_SIGNATURE : CbsAuthorizationType.JSON_WEB_TOKEN; return new ConnectionOptions(fullyQualifiedNamespace, credentials, authorizationType, transport, retryOptions, proxyOptions, scheduler); } private ProxyOptions getDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE; if (proxyOptions != null) { authentication = proxyOptions.getAuthentication(); } String proxyAddress = configuration.get(Configuration.PROPERTY_HTTP_PROXY); if (CoreUtils.isNullOrEmpty(proxyAddress)) { return ProxyOptions.SYSTEM_DEFAULTS; } return getProxyOptions(authentication, proxyAddress); } private ProxyOptions getProxyOptions(ProxyAuthenticationType authentication, String proxyAddress) { String host; int port; if (HOST_PORT_PATTERN.matcher(proxyAddress.trim()).find()) { final String[] hostPort = proxyAddress.split(":"); host = hostPort[0]; port = Integer.parseInt(hostPort[1]); final Proxy proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); final String username = configuration.get(ProxyOptions.PROXY_USERNAME); final String password = configuration.get(ProxyOptions.PROXY_PASSWORD); return new ProxyOptions(authentication, proxy, username, password); } else { com.azure.core.http.ProxyOptions coreProxyOptions = com.azure.core.http.ProxyOptions .fromConfiguration(configuration); return new ProxyOptions(authentication, new Proxy(coreProxyOptions.getType().toProxyType(), coreProxyOptions.getAddress()), coreProxyOptions.getUsername(), coreProxyOptions.getPassword()); } } }
class EventHubClientBuilder { static final int DEFAULT_PREFETCH_COUNT = 500; static final int DEFAULT_PREFETCH_COUNT_FOR_SYNC_CLIENT = 1; /** * The name of the default consumer group in the Event Hubs service. */ public static final String DEFAULT_CONSUMER_GROUP_NAME = "$Default"; /** * The minimum value allowed for the prefetch count of the consumer. */ private static final int MINIMUM_PREFETCH_COUNT = 1; /** * The maximum value allowed for the prefetch count of the consumer. */ private static final int MAXIMUM_PREFETCH_COUNT = 8000; private static final String EVENTHUBS_PROPERTIES_FILE = "azure-messaging-eventhubs.properties"; private static final String NAME_KEY = "name"; private static final String VERSION_KEY = "version"; private static final String UNKNOWN = "UNKNOWN"; private static final String AZURE_EVENT_HUBS_CONNECTION_STRING = "AZURE_EVENT_HUBS_CONNECTION_STRING"; private static final AmqpRetryOptions DEFAULT_RETRY = new AmqpRetryOptions() .setTryTimeout(ClientConstants.OPERATION_TIMEOUT); private static final Pattern HOST_PORT_PATTERN = Pattern.compile("^[^:]+:\\d+"); private final ClientLogger logger = new ClientLogger(EventHubClientBuilder.class); private final Object connectionLock = new Object(); private final AtomicBoolean isSharedConnection = new AtomicBoolean(); private TokenCredential credentials; private Configuration configuration; private ProxyOptions proxyOptions; private AmqpRetryOptions retryOptions; private Scheduler scheduler; private AmqpTransportType transport; private String fullyQualifiedNamespace; private String eventHubName; private String consumerGroup; private EventHubConnectionProcessor eventHubConnectionProcessor; private Integer prefetchCount; private boolean isIdempotentPartitionPublishing; private Map<String, PartitionPublishingState> initialPartitionPublishingStates; /** * Keeps track of the open clients that were created from this builder when there is a shared connection. */ private final AtomicInteger openClients = new AtomicInteger(); /** * Creates a new instance with the default transport {@link AmqpTransportType * non-shared connection means that a dedicated AMQP connection is created for every Event Hub consumer or producer * created using the builder. */ public EventHubClientBuilder() { transport = AmqpTransportType.AMQP; } /** * Sets the credential information given a connection string to the Event Hub instance. * * <p> * If the connection string is copied from the Event Hubs namespace, it will likely not contain the name to the * desired Event Hub, which is needed. In this case, the name can be added manually by adding {@literal * "EntityPath=EVENT_HUB_NAME"} to the end of the connection string. For example, "EntityPath=telemetry-hub". * </p> * * <p> * If you have defined a shared access policy directly on the Event Hub itself, then copying the connection string * from that Event Hub will result in a connection string that contains the name. * </p> * * @param connectionString The connection string to use for connecting to the Event Hub instance. It is expected * that the Event Hub name and the shared access key properties are contained in this connection string. * * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code connectionString} is null or empty. Or, the {@code * connectionString} does not contain the "EntityPath" key, which is the name of the Event Hub instance. * @throws AzureException If the shared access signature token credential could not be created using the * connection string. */ public EventHubClientBuilder connectionString(String connectionString) { ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); TokenCredential tokenCredential = getTokenCredential(properties); return credential(properties.getEndpoint().getHost(), properties.getEntityPath(), tokenCredential); } private TokenCredential getTokenCredential(ConnectionStringProperties properties) { TokenCredential tokenCredential; if (properties.getSharedAccessSignature() == null) { tokenCredential = new EventHubSharedKeyCredential(properties.getSharedAccessKeyName(), properties.getSharedAccessKey(), ClientConstants.TOKEN_VALIDITY); } else { tokenCredential = new EventHubSharedKeyCredential(properties.getSharedAccessSignature()); } return tokenCredential; } /** * Sets the credential information given a connection string to the Event Hubs namespace and name to a specific * Event Hub instance. * * @param connectionString The connection string to use for connecting to the Event Hubs namespace; it is * expected that the shared access key properties are contained in this connection string, but not the Event Hub * name. * @param eventHubName The name of the Event Hub to connect the client to. * * @return The updated {@link EventHubClientBuilder} object. * @throws NullPointerException if {@code connectionString} or {@code eventHubName} is null. * @throws IllegalArgumentException if {@code connectionString} or {@code eventHubName} is an empty string. Or, * if the {@code connectionString} contains the Event Hub name. * @throws AzureException If the shared access signature token credential could not be created using the * connection string. */ public EventHubClientBuilder connectionString(String connectionString, String eventHubName) { Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); if (connectionString.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException( "'connectionString' cannot be an empty string.")); } else if (eventHubName.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException("'eventHubName' cannot be an empty string.")); } final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); TokenCredential tokenCredential = getTokenCredential(properties); if (!CoreUtils.isNullOrEmpty(properties.getEntityPath()) && !eventHubName.equals(properties.getEntityPath())) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "'connectionString' contains an Event Hub name [%s] and it does not match the given " + "'eventHubName' parameter [%s]. Please use the credentials(String connectionString) overload. " + "Or supply a 'connectionString' without 'EntityPath' in it.", properties.getEntityPath(), eventHubName))); } return credential(properties.getEndpoint().getHost(), eventHubName, tokenCredential); } /** * Sets the configuration store that is used during construction of the service client. * * If not specified, the default configuration store is used to configure the {@link EventHubAsyncClient}. Use * {@link Configuration * * @param configuration The configuration store used to configure the {@link EventHubAsyncClient}. * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Toggles the builder to use the same connection for producers or consumers that are built from this instance. By * default, a new connection is constructed and used created for each Event Hub consumer or producer created. * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder shareConnection() { this.isSharedConnection.set(true); return this; } /** * Sets the credential information for which Event Hub instance to connect to, and how to authorize against it. * * @param fullyQualifiedNamespace The fully qualified name for the Event Hubs namespace. This is likely to be * similar to <strong>{@literal "{your-namespace}.servicebus.windows.net}"</strong>. * @param eventHubName The name of the Event Hub to connect the client to. * @param credential The token credential to use for authorization. Access controls may be specified by the * Event Hubs namespace or the requested Event Hub, depending on Azure configuration. * * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code fullyQualifiedNamespace} or {@code eventHubName} is an empty * string. * @throws NullPointerException if {@code fullyQualifiedNamespace}, {@code eventHubName}, {@code credentials} is * null. */ public EventHubClientBuilder credential(String fullyQualifiedNamespace, String eventHubName, TokenCredential credential) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw logger.logExceptionAsError(new IllegalArgumentException("'host' cannot be an empty string.")); } else if (CoreUtils.isNullOrEmpty(eventHubName)) { throw logger.logExceptionAsError(new IllegalArgumentException("'eventHubName' cannot be an empty string.")); } return this; } /** * Sets the proxy configuration to use for {@link EventHubAsyncClient}. When a proxy is configured, {@link * AmqpTransportType * * @param proxyOptions The proxy configuration to use. * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder proxyOptions(ProxyOptions proxyOptions) { this.proxyOptions = proxyOptions; return this; } /** * Sets the transport type by which all the communication with Azure Event Hubs occurs. Default value is {@link * AmqpTransportType * * @param transport The transport type to use. * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder transportType(AmqpTransportType transport) { this.transport = transport; return this; } /** * Sets the retry policy for {@link EventHubAsyncClient}. If not specified, the default retry options are used. * * @param retryOptions The retry policy to use. * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder retry(AmqpRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Sets the name of the consumer group this consumer is associated with. Events are read in the context of this * group. The name of the consumer group that is created by default is {@link * "$Default"}. * * @param consumerGroup The name of the consumer group this consumer is associated with. Events are read in the * context of this group. The name of the consumer group that is created by default is {@link * * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder consumerGroup(String consumerGroup) { this.consumerGroup = consumerGroup; return this; } /** * Sets the count used by the receiver to control the number of events the Event Hub consumer will actively receive * and queue locally without regard to whether a receive operation is currently active. * * @param prefetchCount The amount of events to queue locally. * * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code prefetchCount} is less than {@link * greater than {@link */ public EventHubClientBuilder prefetchCount(int prefetchCount) { if (prefetchCount < MINIMUM_PREFETCH_COUNT) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "PrefetchCount, '%s' has to be above %s", prefetchCount, MINIMUM_PREFETCH_COUNT))); } if (prefetchCount > MAXIMUM_PREFETCH_COUNT) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "PrefetchCount, '%s', has to be below %s", prefetchCount, MAXIMUM_PREFETCH_COUNT))); } this.prefetchCount = prefetchCount; return this; } /** * Enables idempotent publishing when an {@link EventHubProducerAsyncClient} or {@link EventHubProducerClient} * is built. * * If enabled, the producer will only be able to publish directly to partitions; it will not be able to publish to * the Event Hubs gateway for automatic partition routing nor using a partition key. * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder enableIdempotentPartitionPublishing() { this.isIdempotentPartitionPublishing = true; return this; } /** * Sets the idempotent publishing options to {@link EventHubProducerAsyncClient} or {@link EventHubProducerClient} * when you build them. * * The set of options that can be specified to influence publishing behavior specific to the configured Event Hub * partition. * These options are not necessary in the majority of scenarios and are intended for use with specialized scenarios, * such as when recovering the state used for idempotent publishing. * * It is highly recommended that these options only be specified if there is a proven need to do so; Incorrectly * configuring these values may result in the built {@link EventHubProducerAsyncClient} or * {@link EventHubProducerClient} instance unable to publish to the Event Hubs. * * These options are ignored when publishing to the Event Hubs gateway for automatic routing or when using a * partition key. * * @param states A {@link Map} of {@link PartitionPublishingProperties} for each partition. The keys of the map * are the partition ids. * @return The updated {@link EventHubClientBuilder} object. */ /** * Package-private method that sets the scheduler for the created Event Hub client. * * @param scheduler Scheduler to set. * * @return The updated {@link EventHubClientBuilder} object. */ EventHubClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Creates a new {@link EventHubConsumerAsyncClient} based on the options set on this builder. Every time {@code * buildAsyncConsumer()} is invoked, a new instance of {@link EventHubConsumerAsyncClient} is created. * * @return A new {@link EventHubConsumerAsyncClient} with the configured options. * @throws IllegalArgumentException If shared connection is not used and the credentials have not been set using * either {@link * {@link * {@link AmqpTransportType */ public EventHubConsumerAsyncClient buildAsyncConsumerClient() { if (CoreUtils.isNullOrEmpty(consumerGroup)) { throw logger.logExceptionAsError(new IllegalArgumentException("'consumerGroup' cannot be null or an empty " + "string. using EventHubClientBuilder.consumerGroup(String)")); } return buildAsyncClient().createConsumer(consumerGroup, prefetchCount); } /** * Creates a new {@link EventHubConsumerClient} based on the options set on this builder. Every time {@code * buildConsumer()} is invoked, a new instance of {@link EventHubConsumerClient} is created. * * @return A new {@link EventHubConsumerClient} with the configured options. * @throws IllegalArgumentException If shared connection is not used and the credentials have not been set using * either {@link * {@link * {@link AmqpTransportType */ public EventHubConsumerClient buildConsumerClient() { return buildClient().createConsumer(consumerGroup, prefetchCount); } /** * Creates a new {@link EventHubProducerAsyncClient} based on options set on this builder. Every time {@code * buildAsyncProducer()} is invoked, a new instance of {@link EventHubProducerAsyncClient} is created. * * @return A new {@link EventHubProducerAsyncClient} instance with all the configured options. * @throws IllegalArgumentException If shared connection is not used and the credentials have not been set using * either {@link * proxy is specified but the transport type is not {@link AmqpTransportType */ public EventHubProducerAsyncClient buildAsyncProducerClient() { if (initialPartitionPublishingStates != null && !isIdempotentPartitionPublishing) { throw logger.logExceptionAsError(new IllegalArgumentException("'initialPartitionPublishingStates' " + "shouldn't be set if 'idempotentPartitionPublishing' is not true.")); } return buildAsyncClient().createProducer(); } /** * Creates a new {@link EventHubProducerClient} based on options set on this builder. Every time {@code * buildAsyncProducer()} is invoked, a new instance of {@link EventHubProducerClient} is created. * * @return A new {@link EventHubProducerClient} instance with all the configured options. * @throws IllegalArgumentException If shared connection is not used and the credentials have not been set using * either {@link * proxy is specified but the transport type is not {@link AmqpTransportType */ public EventHubProducerClient buildProducerClient() { return buildClient().createProducer(); } /** * Creates a new {@link EventHubAsyncClient} based on options set on this builder. Every time {@code * buildAsyncClient()} is invoked, a new instance of {@link EventHubAsyncClient} is created. * * <p> * The following options are used if ones are not specified in the builder: * * <ul> * <li>If no configuration is specified, the {@link Configuration * is used to provide any shared configuration values. The configuration values read are the {@link * Configuration * ProxyOptions * <li>If no retry is specified, the default retry options are used.</li> * <li>If no proxy is specified, the builder checks the {@link Configuration * configuration} for a configured proxy, then it checks to see if a system proxy is configured.</li> * <li>If no timeout is specified, a {@link ClientConstants * <li>If no scheduler is specified, an {@link Schedulers * </ul> * * @return A new {@link EventHubAsyncClient} instance with all the configured options. * @throws IllegalArgumentException if the credentials have not been set using either {@link * * specified but the transport type is not {@link AmqpTransportType */ EventHubAsyncClient buildAsyncClient() { if (retryOptions == null) { retryOptions = DEFAULT_RETRY; } if (scheduler == null) { scheduler = Schedulers.elastic(); } if (prefetchCount == null) { prefetchCount = DEFAULT_PREFETCH_COUNT; } final MessageSerializer messageSerializer = new EventHubMessageSerializer(); final EventHubConnectionProcessor processor; if (isSharedConnection.get()) { synchronized (connectionLock) { if (eventHubConnectionProcessor == null) { eventHubConnectionProcessor = buildConnectionProcessor(messageSerializer); } } processor = eventHubConnectionProcessor; final int numberOfOpenClients = openClients.incrementAndGet(); logger.info(" } else { processor = buildConnectionProcessor(messageSerializer); } final TracerProvider tracerProvider = new TracerProvider(ServiceLoader.load(Tracer.class)); return new EventHubAsyncClient(processor, tracerProvider, messageSerializer, scheduler, isSharedConnection.get(), this::onClientClose, isIdempotentPartitionPublishing, initialPartitionPublishingStates); } /** * Creates a new {@link EventHubClient} based on options set on this builder. Every time {@code buildClient()} is * invoked, a new instance of {@link EventHubClient} is created. * * <p> * The following options are used if ones are not specified in the builder: * * <ul> * <li>If no configuration is specified, the {@link Configuration * is used to provide any shared configuration values. The configuration values read are the {@link * Configuration * ProxyOptions * <li>If no retry is specified, the default retry options are used.</li> * <li>If no proxy is specified, the builder checks the {@link Configuration * configuration} for a configured proxy, then it checks to see if a system proxy is configured.</li> * <li>If no timeout is specified, a {@link ClientConstants * <li>If no scheduler is specified, an {@link Schedulers * </ul> * * @return A new {@link EventHubClient} instance with all the configured options. * @throws IllegalArgumentException if the credentials have not been set using either {@link * * specified but the transport type is not {@link AmqpTransportType */ EventHubClient buildClient() { if (prefetchCount == null) { prefetchCount = DEFAULT_PREFETCH_COUNT_FOR_SYNC_CLIENT; } final EventHubAsyncClient client = buildAsyncClient(); return new EventHubClient(client, retryOptions); } void onClientClose() { synchronized (connectionLock) { final int numberOfOpenClients = openClients.decrementAndGet(); logger.info("Closing a dependent client. if (numberOfOpenClients > 0) { return; } if (numberOfOpenClients < 0) { logger.warning("There should not be less than 0 clients. actual: {}", numberOfOpenClients); } logger.info("No more open clients, closing shared connection."); if (eventHubConnectionProcessor != null) { eventHubConnectionProcessor.dispose(); eventHubConnectionProcessor = null; } else { logger.warning("Shared EventHubConnectionProcessor was already disposed."); } } } private EventHubConnectionProcessor buildConnectionProcessor(MessageSerializer messageSerializer) { final ConnectionOptions connectionOptions = getConnectionOptions(); final TokenManagerProvider tokenManagerProvider = new AzureTokenManagerProvider( connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), ClientConstants.AZURE_ACTIVE_DIRECTORY_SCOPE); final ReactorProvider provider = new ReactorProvider(); final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider); final Map<String, String> properties = CoreUtils.getProperties(EVENTHUBS_PROPERTIES_FILE); final String product = properties.getOrDefault(NAME_KEY, UNKNOWN); final String clientVersion = properties.getOrDefault(VERSION_KEY, UNKNOWN); final Flux<EventHubAmqpConnection> connectionFlux = Flux.create(sink -> { sink.onRequest(request -> { if (request == 0) { return; } else if (request > 1) { sink.error(logger.logExceptionAsWarning(new IllegalArgumentException( "Requested more than one connection. Only emitting one. Request: " + request))); return; } final String connectionId = StringUtil.getRandomString("MF"); logger.info("connectionId[{}]: Emitting a single connection.", connectionId); final EventHubAmqpConnection connection = new EventHubReactorAmqpConnection(connectionId, connectionOptions, eventHubName, provider, handlerProvider, tokenManagerProvider, messageSerializer, product, clientVersion); sink.next(connection); }); }); return connectionFlux.subscribeWith(new EventHubConnectionProcessor( connectionOptions.getFullyQualifiedNamespace(), eventHubName, connectionOptions.getRetry())); } private ConnectionOptions getConnectionOptions() { configuration = configuration == null ? Configuration.getGlobalConfiguration().clone() : configuration; if (credentials == null) { final String connectionString = configuration.get(AZURE_EVENT_HUBS_CONNECTION_STRING); if (CoreUtils.isNullOrEmpty(connectionString)) { throw logger.logExceptionAsError(new IllegalArgumentException("Credentials have not been set. " + "They can be set using: connectionString(String), connectionString(String, String), " + "credentials(String, String, TokenCredential), or setting the environment variable '" + AZURE_EVENT_HUBS_CONNECTION_STRING + "' with a connection string")); } connectionString(connectionString); } if (proxyOptions == null) { proxyOptions = getDefaultProxyConfiguration(configuration); } if (proxyOptions != null && proxyOptions.isProxyAddressConfigured() && transport != AmqpTransportType.AMQP_WEB_SOCKETS) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot use a proxy when TransportType is not AMQP Web Sockets.")); } final CbsAuthorizationType authorizationType = credentials instanceof EventHubSharedKeyCredential ? CbsAuthorizationType.SHARED_ACCESS_SIGNATURE : CbsAuthorizationType.JSON_WEB_TOKEN; return new ConnectionOptions(fullyQualifiedNamespace, credentials, authorizationType, transport, retryOptions, proxyOptions, scheduler); } private ProxyOptions getDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE; if (proxyOptions != null) { authentication = proxyOptions.getAuthentication(); } String proxyAddress = configuration.get(Configuration.PROPERTY_HTTP_PROXY); if (CoreUtils.isNullOrEmpty(proxyAddress)) { return ProxyOptions.SYSTEM_DEFAULTS; } return getProxyOptions(authentication, proxyAddress); } private ProxyOptions getProxyOptions(ProxyAuthenticationType authentication, String proxyAddress) { String host; int port; if (HOST_PORT_PATTERN.matcher(proxyAddress.trim()).find()) { final String[] hostPort = proxyAddress.split(":"); host = hostPort[0]; port = Integer.parseInt(hostPort[1]); final Proxy proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); final String username = configuration.get(ProxyOptions.PROXY_USERNAME); final String password = configuration.get(ProxyOptions.PROXY_PASSWORD); return new ProxyOptions(authentication, proxy, username, password); } else { com.azure.core.http.ProxyOptions coreProxyOptions = com.azure.core.http.ProxyOptions .fromConfiguration(configuration); return new ProxyOptions(authentication, new Proxy(coreProxyOptions.getType().toProxyType(), coreProxyOptions.getAddress()), coreProxyOptions.getUsername(), coreProxyOptions.getPassword()); } } }
Do we need `inSysProperties` in these? It's an implementation detail. I just want to set the producer group, sequence number, etc on an Event Data, it doesn't matter to me where it is stored.
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.increaseSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } }
eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId());
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.incrementSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (isIdempotentPartitionPublishing && CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { setPartitionPublishingState( partitionId, (Long) properties.get(SymbolConstants.PRODUCER_ID), (Short) properties.get(SymbolConstants.PRODUCER_EPOCH), (Integer) properties.get(SymbolConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } else { synchronized (partitionPublishingStates) { if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, this.isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (isIdempotentPartitionPublishing && CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { setPartitionPublishingState( partitionId, (Long) properties.get(ClientConstants.PRODUCER_ID), (Short) properties.get(ClientConstants.PRODUCER_EPOCH), (Integer) properties.get(ClientConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } else { synchronized (partitionPublishingStates) { if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, this.isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
I give it this specific name because `setProducerGroupIdInSysProperties` is internal and it doesn't set the value of property `producerGroupId`. After `setProducerGroupIdInSysProperties` is called, `getProducerGroupId` still returns null. After the event data is successfully sent out, the `commitProducerDataFromSysProperties` is called to set the value of `producerGroupId` and two other properties. Then `getProducerGroupId` returns the `producerGroupId` with a value. So I think `setProducerGroupIdInSysProperties` would be more specific and meaningful than `setProducerGroupId` when somebody else reads the code.
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.increaseSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } }
eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId());
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.incrementSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (isIdempotentPartitionPublishing && CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { setPartitionPublishingState( partitionId, (Long) properties.get(SymbolConstants.PRODUCER_ID), (Short) properties.get(SymbolConstants.PRODUCER_EPOCH), (Integer) properties.get(SymbolConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } else { synchronized (partitionPublishingStates) { if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, this.isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (isIdempotentPartitionPublishing && CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { setPartitionPublishingState( partitionId, (Long) properties.get(ClientConstants.PRODUCER_ID), (Short) properties.get(ClientConstants.PRODUCER_EPOCH), (Integer) properties.get(ClientConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } else { synchronized (partitionPublishingStates) { if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, this.isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
Oops! What a negligence. Updated.
public EventHubClientBuilder initialPartitionPublishingStates(Map<String, PartitionPublishingProperties> states) { if (states != null) { this.initialPartitionPublishingStates = new HashMap<>(); states.forEach((partitionId, state) -> { this.initialPartitionPublishingStates.put(partitionId, new PartitionPublishingState(state)); this.initialPartitionPublishingStates = Collections.unmodifiableMap(this.initialPartitionPublishingStates); }); } else { this.initialPartitionPublishingStates = null; } return this; }
this.initialPartitionPublishingStates =
public EventHubClientBuilder initialPartitionPublishingStates(Map<String, PartitionPublishingProperties> states) { if (states != null) { this.initialPartitionPublishingStates = new HashMap<>(); states.forEach((partitionId, state) -> { this.initialPartitionPublishingStates.put(partitionId, new PartitionPublishingState(state)); }); this.initialPartitionPublishingStates = Collections.unmodifiableMap(this.initialPartitionPublishingStates); } else { this.initialPartitionPublishingStates = null; } return this; }
class EventHubClientBuilder { static final int DEFAULT_PREFETCH_COUNT = 500; static final int DEFAULT_PREFETCH_COUNT_FOR_SYNC_CLIENT = 1; /** * The name of the default consumer group in the Event Hubs service. */ public static final String DEFAULT_CONSUMER_GROUP_NAME = "$Default"; /** * The minimum value allowed for the prefetch count of the consumer. */ private static final int MINIMUM_PREFETCH_COUNT = 1; /** * The maximum value allowed for the prefetch count of the consumer. */ private static final int MAXIMUM_PREFETCH_COUNT = 8000; private static final String EVENTHUBS_PROPERTIES_FILE = "azure-messaging-eventhubs.properties"; private static final String NAME_KEY = "name"; private static final String VERSION_KEY = "version"; private static final String UNKNOWN = "UNKNOWN"; private static final String AZURE_EVENT_HUBS_CONNECTION_STRING = "AZURE_EVENT_HUBS_CONNECTION_STRING"; private static final AmqpRetryOptions DEFAULT_RETRY = new AmqpRetryOptions() .setTryTimeout(ClientConstants.OPERATION_TIMEOUT); private static final Pattern HOST_PORT_PATTERN = Pattern.compile("^[^:]+:\\d+"); private final ClientLogger logger = new ClientLogger(EventHubClientBuilder.class); private final Object connectionLock = new Object(); private final AtomicBoolean isSharedConnection = new AtomicBoolean(); private TokenCredential credentials; private Configuration configuration; private ProxyOptions proxyOptions; private AmqpRetryOptions retryOptions; private Scheduler scheduler; private AmqpTransportType transport; private String fullyQualifiedNamespace; private String eventHubName; private String consumerGroup; private EventHubConnectionProcessor eventHubConnectionProcessor; private Integer prefetchCount; private boolean isIdempotentPartitionPublishing; private Map<String, PartitionPublishingState> initialPartitionPublishingStates; /** * Keeps track of the open clients that were created from this builder when there is a shared connection. */ private final AtomicInteger openClients = new AtomicInteger(); /** * Creates a new instance with the default transport {@link AmqpTransportType * non-shared connection means that a dedicated AMQP connection is created for every Event Hub consumer or producer * created using the builder. */ public EventHubClientBuilder() { transport = AmqpTransportType.AMQP; } /** * Sets the credential information given a connection string to the Event Hub instance. * * <p> * If the connection string is copied from the Event Hubs namespace, it will likely not contain the name to the * desired Event Hub, which is needed. In this case, the name can be added manually by adding {@literal * "EntityPath=EVENT_HUB_NAME"} to the end of the connection string. For example, "EntityPath=telemetry-hub". * </p> * * <p> * If you have defined a shared access policy directly on the Event Hub itself, then copying the connection string * from that Event Hub will result in a connection string that contains the name. * </p> * * @param connectionString The connection string to use for connecting to the Event Hub instance. It is expected * that the Event Hub name and the shared access key properties are contained in this connection string. * * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code connectionString} is null or empty. Or, the {@code * connectionString} does not contain the "EntityPath" key, which is the name of the Event Hub instance. * @throws AzureException If the shared access signature token credential could not be created using the * connection string. */ public EventHubClientBuilder connectionString(String connectionString) { ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); TokenCredential tokenCredential = getTokenCredential(properties); return credential(properties.getEndpoint().getHost(), properties.getEntityPath(), tokenCredential); } private TokenCredential getTokenCredential(ConnectionStringProperties properties) { TokenCredential tokenCredential; if (properties.getSharedAccessSignature() == null) { tokenCredential = new EventHubSharedKeyCredential(properties.getSharedAccessKeyName(), properties.getSharedAccessKey(), ClientConstants.TOKEN_VALIDITY); } else { tokenCredential = new EventHubSharedKeyCredential(properties.getSharedAccessSignature()); } return tokenCredential; } /** * Sets the credential information given a connection string to the Event Hubs namespace and name to a specific * Event Hub instance. * * @param connectionString The connection string to use for connecting to the Event Hubs namespace; it is * expected that the shared access key properties are contained in this connection string, but not the Event Hub * name. * @param eventHubName The name of the Event Hub to connect the client to. * * @return The updated {@link EventHubClientBuilder} object. * @throws NullPointerException if {@code connectionString} or {@code eventHubName} is null. * @throws IllegalArgumentException if {@code connectionString} or {@code eventHubName} is an empty string. Or, * if the {@code connectionString} contains the Event Hub name. * @throws AzureException If the shared access signature token credential could not be created using the * connection string. */ public EventHubClientBuilder connectionString(String connectionString, String eventHubName) { Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); if (connectionString.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException( "'connectionString' cannot be an empty string.")); } else if (eventHubName.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException("'eventHubName' cannot be an empty string.")); } final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); TokenCredential tokenCredential = getTokenCredential(properties); if (!CoreUtils.isNullOrEmpty(properties.getEntityPath()) && !eventHubName.equals(properties.getEntityPath())) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "'connectionString' contains an Event Hub name [%s] and it does not match the given " + "'eventHubName' parameter [%s]. Please use the credentials(String connectionString) overload. " + "Or supply a 'connectionString' without 'EntityPath' in it.", properties.getEntityPath(), eventHubName))); } return credential(properties.getEndpoint().getHost(), eventHubName, tokenCredential); } /** * Sets the configuration store that is used during construction of the service client. * * If not specified, the default configuration store is used to configure the {@link EventHubAsyncClient}. Use * {@link Configuration * * @param configuration The configuration store used to configure the {@link EventHubAsyncClient}. * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Toggles the builder to use the same connection for producers or consumers that are built from this instance. By * default, a new connection is constructed and used created for each Event Hub consumer or producer created. * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder shareConnection() { this.isSharedConnection.set(true); return this; } /** * Sets the credential information for which Event Hub instance to connect to, and how to authorize against it. * * @param fullyQualifiedNamespace The fully qualified name for the Event Hubs namespace. This is likely to be * similar to <strong>{@literal "{your-namespace}.servicebus.windows.net}"</strong>. * @param eventHubName The name of the Event Hub to connect the client to. * @param credential The token credential to use for authorization. Access controls may be specified by the * Event Hubs namespace or the requested Event Hub, depending on Azure configuration. * * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code fullyQualifiedNamespace} or {@code eventHubName} is an empty * string. * @throws NullPointerException if {@code fullyQualifiedNamespace}, {@code eventHubName}, {@code credentials} is * null. */ public EventHubClientBuilder credential(String fullyQualifiedNamespace, String eventHubName, TokenCredential credential) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw logger.logExceptionAsError(new IllegalArgumentException("'host' cannot be an empty string.")); } else if (CoreUtils.isNullOrEmpty(eventHubName)) { throw logger.logExceptionAsError(new IllegalArgumentException("'eventHubName' cannot be an empty string.")); } return this; } /** * Sets the proxy configuration to use for {@link EventHubAsyncClient}. When a proxy is configured, {@link * AmqpTransportType * * @param proxyOptions The proxy configuration to use. * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder proxyOptions(ProxyOptions proxyOptions) { this.proxyOptions = proxyOptions; return this; } /** * Sets the transport type by which all the communication with Azure Event Hubs occurs. Default value is {@link * AmqpTransportType * * @param transport The transport type to use. * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder transportType(AmqpTransportType transport) { this.transport = transport; return this; } /** * Sets the retry policy for {@link EventHubAsyncClient}. If not specified, the default retry options are used. * * @param retryOptions The retry policy to use. * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder retry(AmqpRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Sets the name of the consumer group this consumer is associated with. Events are read in the context of this * group. The name of the consumer group that is created by default is {@link * "$Default"}. * * @param consumerGroup The name of the consumer group this consumer is associated with. Events are read in the * context of this group. The name of the consumer group that is created by default is {@link * * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder consumerGroup(String consumerGroup) { this.consumerGroup = consumerGroup; return this; } /** * Sets the count used by the receiver to control the number of events the Event Hub consumer will actively receive * and queue locally without regard to whether a receive operation is currently active. * * @param prefetchCount The amount of events to queue locally. * * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code prefetchCount} is less than {@link * greater than {@link */ public EventHubClientBuilder prefetchCount(int prefetchCount) { if (prefetchCount < MINIMUM_PREFETCH_COUNT) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "PrefetchCount, '%s' has to be above %s", prefetchCount, MINIMUM_PREFETCH_COUNT))); } if (prefetchCount > MAXIMUM_PREFETCH_COUNT) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "PrefetchCount, '%s', has to be below %s", prefetchCount, MAXIMUM_PREFETCH_COUNT))); } this.prefetchCount = prefetchCount; return this; } /** * Enables idempotent publishing when an {@link EventHubProducerAsyncClient} or {@link EventHubProducerClient} * is built. * * If enabled, the producer will only be able to publish directly to partitions; it will not be able to publish to * the Event Hubs gateway for automatic partition routing nor using a partition key. * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder enableIdempotentPartitionPublishing() { this.isIdempotentPartitionPublishing = true; return this; } /** * Sets the idempotent publishing options to {@link EventHubProducerAsyncClient} or {@link EventHubProducerClient} * when you build them. * * The set of options that can be specified to influence publishing behavior specific to the configured Event Hub * partition. * These options are not necessary in the majority of scenarios and are intended for use with specialized scenarios, * such as when recovering the state used for idempotent publishing. * * It is highly recommended that these options only be specified if there is a proven need to do so; Incorrectly * configuring these values may result in the built {@link EventHubProducerAsyncClient} or * {@link EventHubProducerClient} instance unable to publish to the Event Hubs. * * These options are ignored when publishing to the Event Hubs gateway for automatic routing or when using a * partition key. * * @param states A {@link Map} of {@link PartitionPublishingProperties} for each partition. The keys of the map * are the partition ids. * @return The updated {@link EventHubClientBuilder} object. */ /** * Package-private method that sets the scheduler for the created Event Hub client. * * @param scheduler Scheduler to set. * * @return The updated {@link EventHubClientBuilder} object. */ EventHubClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Creates a new {@link EventHubConsumerAsyncClient} based on the options set on this builder. Every time {@code * buildAsyncConsumer()} is invoked, a new instance of {@link EventHubConsumerAsyncClient} is created. * * @return A new {@link EventHubConsumerAsyncClient} with the configured options. * @throws IllegalArgumentException If shared connection is not used and the credentials have not been set using * either {@link * {@link * {@link AmqpTransportType */ public EventHubConsumerAsyncClient buildAsyncConsumerClient() { if (CoreUtils.isNullOrEmpty(consumerGroup)) { throw logger.logExceptionAsError(new IllegalArgumentException("'consumerGroup' cannot be null or an empty " + "string. using EventHubClientBuilder.consumerGroup(String)")); } return buildAsyncClient().createConsumer(consumerGroup, prefetchCount); } /** * Creates a new {@link EventHubConsumerClient} based on the options set on this builder. Every time {@code * buildConsumer()} is invoked, a new instance of {@link EventHubConsumerClient} is created. * * @return A new {@link EventHubConsumerClient} with the configured options. * @throws IllegalArgumentException If shared connection is not used and the credentials have not been set using * either {@link * {@link * {@link AmqpTransportType */ public EventHubConsumerClient buildConsumerClient() { return buildClient().createConsumer(consumerGroup, prefetchCount); } /** * Creates a new {@link EventHubProducerAsyncClient} based on options set on this builder. Every time {@code * buildAsyncProducer()} is invoked, a new instance of {@link EventHubProducerAsyncClient} is created. * * @return A new {@link EventHubProducerAsyncClient} instance with all the configured options. * @throws IllegalArgumentException If shared connection is not used and the credentials have not been set using * either {@link * proxy is specified but the transport type is not {@link AmqpTransportType */ public EventHubProducerAsyncClient buildAsyncProducerClient() { if (initialPartitionPublishingStates != null && !isIdempotentPartitionPublishing) { throw logger.logExceptionAsError(new IllegalArgumentException("'initialPartitionPublishingStates' " + "shouldn't be set if 'idempotentPartitionPublishing' is not true.")); } return buildAsyncClient().createProducer(); } /** * Creates a new {@link EventHubProducerClient} based on options set on this builder. Every time {@code * buildAsyncProducer()} is invoked, a new instance of {@link EventHubProducerClient} is created. * * @return A new {@link EventHubProducerClient} instance with all the configured options. * @throws IllegalArgumentException If shared connection is not used and the credentials have not been set using * either {@link * proxy is specified but the transport type is not {@link AmqpTransportType */ public EventHubProducerClient buildProducerClient() { return buildClient().createProducer(); } /** * Creates a new {@link EventHubAsyncClient} based on options set on this builder. Every time {@code * buildAsyncClient()} is invoked, a new instance of {@link EventHubAsyncClient} is created. * * <p> * The following options are used if ones are not specified in the builder: * * <ul> * <li>If no configuration is specified, the {@link Configuration * is used to provide any shared configuration values. The configuration values read are the {@link * Configuration * ProxyOptions * <li>If no retry is specified, the default retry options are used.</li> * <li>If no proxy is specified, the builder checks the {@link Configuration * configuration} for a configured proxy, then it checks to see if a system proxy is configured.</li> * <li>If no timeout is specified, a {@link ClientConstants * <li>If no scheduler is specified, an {@link Schedulers * </ul> * * @return A new {@link EventHubAsyncClient} instance with all the configured options. * @throws IllegalArgumentException if the credentials have not been set using either {@link * * specified but the transport type is not {@link AmqpTransportType */ EventHubAsyncClient buildAsyncClient() { if (retryOptions == null) { retryOptions = DEFAULT_RETRY; } if (scheduler == null) { scheduler = Schedulers.elastic(); } if (prefetchCount == null) { prefetchCount = DEFAULT_PREFETCH_COUNT; } final MessageSerializer messageSerializer = new EventHubMessageSerializer(); final EventHubConnectionProcessor processor; if (isSharedConnection.get()) { synchronized (connectionLock) { if (eventHubConnectionProcessor == null) { eventHubConnectionProcessor = buildConnectionProcessor(messageSerializer); } } processor = eventHubConnectionProcessor; final int numberOfOpenClients = openClients.incrementAndGet(); logger.info(" } else { processor = buildConnectionProcessor(messageSerializer); } final TracerProvider tracerProvider = new TracerProvider(ServiceLoader.load(Tracer.class)); return new EventHubAsyncClient(processor, tracerProvider, messageSerializer, scheduler, isSharedConnection.get(), this::onClientClose, isIdempotentPartitionPublishing, initialPartitionPublishingStates); } /** * Creates a new {@link EventHubClient} based on options set on this builder. Every time {@code buildClient()} is * invoked, a new instance of {@link EventHubClient} is created. * * <p> * The following options are used if ones are not specified in the builder: * * <ul> * <li>If no configuration is specified, the {@link Configuration * is used to provide any shared configuration values. The configuration values read are the {@link * Configuration * ProxyOptions * <li>If no retry is specified, the default retry options are used.</li> * <li>If no proxy is specified, the builder checks the {@link Configuration * configuration} for a configured proxy, then it checks to see if a system proxy is configured.</li> * <li>If no timeout is specified, a {@link ClientConstants * <li>If no scheduler is specified, an {@link Schedulers * </ul> * * @return A new {@link EventHubClient} instance with all the configured options. * @throws IllegalArgumentException if the credentials have not been set using either {@link * * specified but the transport type is not {@link AmqpTransportType */ EventHubClient buildClient() { if (prefetchCount == null) { prefetchCount = DEFAULT_PREFETCH_COUNT_FOR_SYNC_CLIENT; } final EventHubAsyncClient client = buildAsyncClient(); return new EventHubClient(client, retryOptions); } void onClientClose() { synchronized (connectionLock) { final int numberOfOpenClients = openClients.decrementAndGet(); logger.info("Closing a dependent client. if (numberOfOpenClients > 0) { return; } if (numberOfOpenClients < 0) { logger.warning("There should not be less than 0 clients. actual: {}", numberOfOpenClients); } logger.info("No more open clients, closing shared connection."); if (eventHubConnectionProcessor != null) { eventHubConnectionProcessor.dispose(); eventHubConnectionProcessor = null; } else { logger.warning("Shared EventHubConnectionProcessor was already disposed."); } } } private EventHubConnectionProcessor buildConnectionProcessor(MessageSerializer messageSerializer) { final ConnectionOptions connectionOptions = getConnectionOptions(); final TokenManagerProvider tokenManagerProvider = new AzureTokenManagerProvider( connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), ClientConstants.AZURE_ACTIVE_DIRECTORY_SCOPE); final ReactorProvider provider = new ReactorProvider(); final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider); final Map<String, String> properties = CoreUtils.getProperties(EVENTHUBS_PROPERTIES_FILE); final String product = properties.getOrDefault(NAME_KEY, UNKNOWN); final String clientVersion = properties.getOrDefault(VERSION_KEY, UNKNOWN); final Flux<EventHubAmqpConnection> connectionFlux = Flux.create(sink -> { sink.onRequest(request -> { if (request == 0) { return; } else if (request > 1) { sink.error(logger.logExceptionAsWarning(new IllegalArgumentException( "Requested more than one connection. Only emitting one. Request: " + request))); return; } final String connectionId = StringUtil.getRandomString("MF"); logger.info("connectionId[{}]: Emitting a single connection.", connectionId); final EventHubAmqpConnection connection = new EventHubReactorAmqpConnection(connectionId, connectionOptions, eventHubName, provider, handlerProvider, tokenManagerProvider, messageSerializer, product, clientVersion); sink.next(connection); }); }); return connectionFlux.subscribeWith(new EventHubConnectionProcessor( connectionOptions.getFullyQualifiedNamespace(), eventHubName, connectionOptions.getRetry())); } private ConnectionOptions getConnectionOptions() { configuration = configuration == null ? Configuration.getGlobalConfiguration().clone() : configuration; if (credentials == null) { final String connectionString = configuration.get(AZURE_EVENT_HUBS_CONNECTION_STRING); if (CoreUtils.isNullOrEmpty(connectionString)) { throw logger.logExceptionAsError(new IllegalArgumentException("Credentials have not been set. " + "They can be set using: connectionString(String), connectionString(String, String), " + "credentials(String, String, TokenCredential), or setting the environment variable '" + AZURE_EVENT_HUBS_CONNECTION_STRING + "' with a connection string")); } connectionString(connectionString); } if (proxyOptions == null) { proxyOptions = getDefaultProxyConfiguration(configuration); } if (proxyOptions != null && proxyOptions.isProxyAddressConfigured() && transport != AmqpTransportType.AMQP_WEB_SOCKETS) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot use a proxy when TransportType is not AMQP Web Sockets.")); } final CbsAuthorizationType authorizationType = credentials instanceof EventHubSharedKeyCredential ? CbsAuthorizationType.SHARED_ACCESS_SIGNATURE : CbsAuthorizationType.JSON_WEB_TOKEN; return new ConnectionOptions(fullyQualifiedNamespace, credentials, authorizationType, transport, retryOptions, proxyOptions, scheduler); } private ProxyOptions getDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE; if (proxyOptions != null) { authentication = proxyOptions.getAuthentication(); } String proxyAddress = configuration.get(Configuration.PROPERTY_HTTP_PROXY); if (CoreUtils.isNullOrEmpty(proxyAddress)) { return ProxyOptions.SYSTEM_DEFAULTS; } return getProxyOptions(authentication, proxyAddress); } private ProxyOptions getProxyOptions(ProxyAuthenticationType authentication, String proxyAddress) { String host; int port; if (HOST_PORT_PATTERN.matcher(proxyAddress.trim()).find()) { final String[] hostPort = proxyAddress.split(":"); host = hostPort[0]; port = Integer.parseInt(hostPort[1]); final Proxy proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); final String username = configuration.get(ProxyOptions.PROXY_USERNAME); final String password = configuration.get(ProxyOptions.PROXY_PASSWORD); return new ProxyOptions(authentication, proxy, username, password); } else { com.azure.core.http.ProxyOptions coreProxyOptions = com.azure.core.http.ProxyOptions .fromConfiguration(configuration); return new ProxyOptions(authentication, new Proxy(coreProxyOptions.getType().toProxyType(), coreProxyOptions.getAddress()), coreProxyOptions.getUsername(), coreProxyOptions.getPassword()); } } }
class EventHubClientBuilder { static final int DEFAULT_PREFETCH_COUNT = 500; static final int DEFAULT_PREFETCH_COUNT_FOR_SYNC_CLIENT = 1; /** * The name of the default consumer group in the Event Hubs service. */ public static final String DEFAULT_CONSUMER_GROUP_NAME = "$Default"; /** * The minimum value allowed for the prefetch count of the consumer. */ private static final int MINIMUM_PREFETCH_COUNT = 1; /** * The maximum value allowed for the prefetch count of the consumer. */ private static final int MAXIMUM_PREFETCH_COUNT = 8000; private static final String EVENTHUBS_PROPERTIES_FILE = "azure-messaging-eventhubs.properties"; private static final String NAME_KEY = "name"; private static final String VERSION_KEY = "version"; private static final String UNKNOWN = "UNKNOWN"; private static final String AZURE_EVENT_HUBS_CONNECTION_STRING = "AZURE_EVENT_HUBS_CONNECTION_STRING"; private static final AmqpRetryOptions DEFAULT_RETRY = new AmqpRetryOptions() .setTryTimeout(ClientConstants.OPERATION_TIMEOUT); private static final Pattern HOST_PORT_PATTERN = Pattern.compile("^[^:]+:\\d+"); private final ClientLogger logger = new ClientLogger(EventHubClientBuilder.class); private final Object connectionLock = new Object(); private final AtomicBoolean isSharedConnection = new AtomicBoolean(); private TokenCredential credentials; private Configuration configuration; private ProxyOptions proxyOptions; private AmqpRetryOptions retryOptions; private Scheduler scheduler; private AmqpTransportType transport; private String fullyQualifiedNamespace; private String eventHubName; private String consumerGroup; private EventHubConnectionProcessor eventHubConnectionProcessor; private Integer prefetchCount; private boolean isIdempotentPartitionPublishing; private Map<String, PartitionPublishingState> initialPartitionPublishingStates; /** * Keeps track of the open clients that were created from this builder when there is a shared connection. */ private final AtomicInteger openClients = new AtomicInteger(); /** * Creates a new instance with the default transport {@link AmqpTransportType * non-shared connection means that a dedicated AMQP connection is created for every Event Hub consumer or producer * created using the builder. */ public EventHubClientBuilder() { transport = AmqpTransportType.AMQP; } /** * Sets the credential information given a connection string to the Event Hub instance. * * <p> * If the connection string is copied from the Event Hubs namespace, it will likely not contain the name to the * desired Event Hub, which is needed. In this case, the name can be added manually by adding {@literal * "EntityPath=EVENT_HUB_NAME"} to the end of the connection string. For example, "EntityPath=telemetry-hub". * </p> * * <p> * If you have defined a shared access policy directly on the Event Hub itself, then copying the connection string * from that Event Hub will result in a connection string that contains the name. * </p> * * @param connectionString The connection string to use for connecting to the Event Hub instance. It is expected * that the Event Hub name and the shared access key properties are contained in this connection string. * * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code connectionString} is null or empty. Or, the {@code * connectionString} does not contain the "EntityPath" key, which is the name of the Event Hub instance. * @throws AzureException If the shared access signature token credential could not be created using the * connection string. */ public EventHubClientBuilder connectionString(String connectionString) { ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); TokenCredential tokenCredential = getTokenCredential(properties); return credential(properties.getEndpoint().getHost(), properties.getEntityPath(), tokenCredential); } private TokenCredential getTokenCredential(ConnectionStringProperties properties) { TokenCredential tokenCredential; if (properties.getSharedAccessSignature() == null) { tokenCredential = new EventHubSharedKeyCredential(properties.getSharedAccessKeyName(), properties.getSharedAccessKey(), ClientConstants.TOKEN_VALIDITY); } else { tokenCredential = new EventHubSharedKeyCredential(properties.getSharedAccessSignature()); } return tokenCredential; } /** * Sets the credential information given a connection string to the Event Hubs namespace and name to a specific * Event Hub instance. * * @param connectionString The connection string to use for connecting to the Event Hubs namespace; it is * expected that the shared access key properties are contained in this connection string, but not the Event Hub * name. * @param eventHubName The name of the Event Hub to connect the client to. * * @return The updated {@link EventHubClientBuilder} object. * @throws NullPointerException if {@code connectionString} or {@code eventHubName} is null. * @throws IllegalArgumentException if {@code connectionString} or {@code eventHubName} is an empty string. Or, * if the {@code connectionString} contains the Event Hub name. * @throws AzureException If the shared access signature token credential could not be created using the * connection string. */ public EventHubClientBuilder connectionString(String connectionString, String eventHubName) { Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); if (connectionString.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException( "'connectionString' cannot be an empty string.")); } else if (eventHubName.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException("'eventHubName' cannot be an empty string.")); } final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); TokenCredential tokenCredential = getTokenCredential(properties); if (!CoreUtils.isNullOrEmpty(properties.getEntityPath()) && !eventHubName.equals(properties.getEntityPath())) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "'connectionString' contains an Event Hub name [%s] and it does not match the given " + "'eventHubName' parameter [%s]. Please use the credentials(String connectionString) overload. " + "Or supply a 'connectionString' without 'EntityPath' in it.", properties.getEntityPath(), eventHubName))); } return credential(properties.getEndpoint().getHost(), eventHubName, tokenCredential); } /** * Sets the configuration store that is used during construction of the service client. * * If not specified, the default configuration store is used to configure the {@link EventHubAsyncClient}. Use * {@link Configuration * * @param configuration The configuration store used to configure the {@link EventHubAsyncClient}. * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Toggles the builder to use the same connection for producers or consumers that are built from this instance. By * default, a new connection is constructed and used created for each Event Hub consumer or producer created. * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder shareConnection() { this.isSharedConnection.set(true); return this; } /** * Sets the credential information for which Event Hub instance to connect to, and how to authorize against it. * * @param fullyQualifiedNamespace The fully qualified name for the Event Hubs namespace. This is likely to be * similar to <strong>{@literal "{your-namespace}.servicebus.windows.net}"</strong>. * @param eventHubName The name of the Event Hub to connect the client to. * @param credential The token credential to use for authorization. Access controls may be specified by the * Event Hubs namespace or the requested Event Hub, depending on Azure configuration. * * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code fullyQualifiedNamespace} or {@code eventHubName} is an empty * string. * @throws NullPointerException if {@code fullyQualifiedNamespace}, {@code eventHubName}, {@code credentials} is * null. */ public EventHubClientBuilder credential(String fullyQualifiedNamespace, String eventHubName, TokenCredential credential) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw logger.logExceptionAsError(new IllegalArgumentException("'host' cannot be an empty string.")); } else if (CoreUtils.isNullOrEmpty(eventHubName)) { throw logger.logExceptionAsError(new IllegalArgumentException("'eventHubName' cannot be an empty string.")); } return this; } /** * Sets the proxy configuration to use for {@link EventHubAsyncClient}. When a proxy is configured, {@link * AmqpTransportType * * @param proxyOptions The proxy configuration to use. * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder proxyOptions(ProxyOptions proxyOptions) { this.proxyOptions = proxyOptions; return this; } /** * Sets the transport type by which all the communication with Azure Event Hubs occurs. Default value is {@link * AmqpTransportType * * @param transport The transport type to use. * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder transportType(AmqpTransportType transport) { this.transport = transport; return this; } /** * Sets the retry policy for {@link EventHubAsyncClient}. If not specified, the default retry options are used. * * @param retryOptions The retry policy to use. * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder retry(AmqpRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Sets the name of the consumer group this consumer is associated with. Events are read in the context of this * group. The name of the consumer group that is created by default is {@link * "$Default"}. * * @param consumerGroup The name of the consumer group this consumer is associated with. Events are read in the * context of this group. The name of the consumer group that is created by default is {@link * * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder consumerGroup(String consumerGroup) { this.consumerGroup = consumerGroup; return this; } /** * Sets the count used by the receiver to control the number of events the Event Hub consumer will actively receive * and queue locally without regard to whether a receive operation is currently active. * * @param prefetchCount The amount of events to queue locally. * * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code prefetchCount} is less than {@link * greater than {@link */ public EventHubClientBuilder prefetchCount(int prefetchCount) { if (prefetchCount < MINIMUM_PREFETCH_COUNT) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "PrefetchCount, '%s' has to be above %s", prefetchCount, MINIMUM_PREFETCH_COUNT))); } if (prefetchCount > MAXIMUM_PREFETCH_COUNT) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "PrefetchCount, '%s', has to be below %s", prefetchCount, MAXIMUM_PREFETCH_COUNT))); } this.prefetchCount = prefetchCount; return this; } /** * Enables idempotent publishing when an {@link EventHubProducerAsyncClient} or {@link EventHubProducerClient} * is built. * * If enabled, the producer will only be able to publish directly to partitions; it will not be able to publish to * the Event Hubs gateway for automatic partition routing nor using a partition key. * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder enableIdempotentPartitionPublishing() { this.isIdempotentPartitionPublishing = true; return this; } /** * Sets the idempotent publishing options to {@link EventHubProducerAsyncClient} or {@link EventHubProducerClient} * when you build them. * * The set of options that can be specified to influence publishing behavior specific to the configured Event Hub * partition. * These options are not necessary in the majority of scenarios and are intended for use with specialized scenarios, * such as when recovering the state used for idempotent publishing. * * It is highly recommended that these options only be specified if there is a proven need to do so; Incorrectly * configuring these values may result in the built {@link EventHubProducerAsyncClient} or * {@link EventHubProducerClient} instance unable to publish to the Event Hubs. * * These options are ignored when publishing to the Event Hubs gateway for automatic routing or when using a * partition key. * * @param states A {@link Map} of {@link PartitionPublishingProperties} for each partition. The keys of the map * are the partition ids. * @return The updated {@link EventHubClientBuilder} object. */ /** * Package-private method that sets the scheduler for the created Event Hub client. * * @param scheduler Scheduler to set. * * @return The updated {@link EventHubClientBuilder} object. */ EventHubClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Creates a new {@link EventHubConsumerAsyncClient} based on the options set on this builder. Every time {@code * buildAsyncConsumer()} is invoked, a new instance of {@link EventHubConsumerAsyncClient} is created. * * @return A new {@link EventHubConsumerAsyncClient} with the configured options. * @throws IllegalArgumentException If shared connection is not used and the credentials have not been set using * either {@link * {@link * {@link AmqpTransportType */ public EventHubConsumerAsyncClient buildAsyncConsumerClient() { if (CoreUtils.isNullOrEmpty(consumerGroup)) { throw logger.logExceptionAsError(new IllegalArgumentException("'consumerGroup' cannot be null or an empty " + "string. using EventHubClientBuilder.consumerGroup(String)")); } return buildAsyncClient().createConsumer(consumerGroup, prefetchCount); } /** * Creates a new {@link EventHubConsumerClient} based on the options set on this builder. Every time {@code * buildConsumer()} is invoked, a new instance of {@link EventHubConsumerClient} is created. * * @return A new {@link EventHubConsumerClient} with the configured options. * @throws IllegalArgumentException If shared connection is not used and the credentials have not been set using * either {@link * {@link * {@link AmqpTransportType */ public EventHubConsumerClient buildConsumerClient() { return buildClient().createConsumer(consumerGroup, prefetchCount); } /** * Creates a new {@link EventHubProducerAsyncClient} based on options set on this builder. Every time {@code * buildAsyncProducer()} is invoked, a new instance of {@link EventHubProducerAsyncClient} is created. * * @return A new {@link EventHubProducerAsyncClient} instance with all the configured options. * @throws IllegalArgumentException If shared connection is not used and the credentials have not been set using * either {@link * proxy is specified but the transport type is not {@link AmqpTransportType */ public EventHubProducerAsyncClient buildAsyncProducerClient() { if (initialPartitionPublishingStates != null && !isIdempotentPartitionPublishing) { throw logger.logExceptionAsError(new IllegalArgumentException("'initialPartitionPublishingStates' " + "shouldn't be set if 'idempotentPartitionPublishing' is not true.")); } return buildAsyncClient().createProducer(); } /** * Creates a new {@link EventHubProducerClient} based on options set on this builder. Every time {@code * buildAsyncProducer()} is invoked, a new instance of {@link EventHubProducerClient} is created. * * @return A new {@link EventHubProducerClient} instance with all the configured options. * @throws IllegalArgumentException If shared connection is not used and the credentials have not been set using * either {@link * proxy is specified but the transport type is not {@link AmqpTransportType */ public EventHubProducerClient buildProducerClient() { return buildClient().createProducer(); } /** * Creates a new {@link EventHubAsyncClient} based on options set on this builder. Every time {@code * buildAsyncClient()} is invoked, a new instance of {@link EventHubAsyncClient} is created. * * <p> * The following options are used if ones are not specified in the builder: * * <ul> * <li>If no configuration is specified, the {@link Configuration * is used to provide any shared configuration values. The configuration values read are the {@link * Configuration * ProxyOptions * <li>If no retry is specified, the default retry options are used.</li> * <li>If no proxy is specified, the builder checks the {@link Configuration * configuration} for a configured proxy, then it checks to see if a system proxy is configured.</li> * <li>If no timeout is specified, a {@link ClientConstants * <li>If no scheduler is specified, an {@link Schedulers * </ul> * * @return A new {@link EventHubAsyncClient} instance with all the configured options. * @throws IllegalArgumentException if the credentials have not been set using either {@link * * specified but the transport type is not {@link AmqpTransportType */ EventHubAsyncClient buildAsyncClient() { if (retryOptions == null) { retryOptions = DEFAULT_RETRY; } if (scheduler == null) { scheduler = Schedulers.elastic(); } if (prefetchCount == null) { prefetchCount = DEFAULT_PREFETCH_COUNT; } final MessageSerializer messageSerializer = new EventHubMessageSerializer(); final EventHubConnectionProcessor processor; if (isSharedConnection.get()) { synchronized (connectionLock) { if (eventHubConnectionProcessor == null) { eventHubConnectionProcessor = buildConnectionProcessor(messageSerializer); } } processor = eventHubConnectionProcessor; final int numberOfOpenClients = openClients.incrementAndGet(); logger.info(" } else { processor = buildConnectionProcessor(messageSerializer); } final TracerProvider tracerProvider = new TracerProvider(ServiceLoader.load(Tracer.class)); return new EventHubAsyncClient(processor, tracerProvider, messageSerializer, scheduler, isSharedConnection.get(), this::onClientClose, isIdempotentPartitionPublishing, initialPartitionPublishingStates); } /** * Creates a new {@link EventHubClient} based on options set on this builder. Every time {@code buildClient()} is * invoked, a new instance of {@link EventHubClient} is created. * * <p> * The following options are used if ones are not specified in the builder: * * <ul> * <li>If no configuration is specified, the {@link Configuration * is used to provide any shared configuration values. The configuration values read are the {@link * Configuration * ProxyOptions * <li>If no retry is specified, the default retry options are used.</li> * <li>If no proxy is specified, the builder checks the {@link Configuration * configuration} for a configured proxy, then it checks to see if a system proxy is configured.</li> * <li>If no timeout is specified, a {@link ClientConstants * <li>If no scheduler is specified, an {@link Schedulers * </ul> * * @return A new {@link EventHubClient} instance with all the configured options. * @throws IllegalArgumentException if the credentials have not been set using either {@link * * specified but the transport type is not {@link AmqpTransportType */ EventHubClient buildClient() { if (prefetchCount == null) { prefetchCount = DEFAULT_PREFETCH_COUNT_FOR_SYNC_CLIENT; } final EventHubAsyncClient client = buildAsyncClient(); return new EventHubClient(client, retryOptions); } void onClientClose() { synchronized (connectionLock) { final int numberOfOpenClients = openClients.decrementAndGet(); logger.info("Closing a dependent client. if (numberOfOpenClients > 0) { return; } if (numberOfOpenClients < 0) { logger.warning("There should not be less than 0 clients. actual: {}", numberOfOpenClients); } logger.info("No more open clients, closing shared connection."); if (eventHubConnectionProcessor != null) { eventHubConnectionProcessor.dispose(); eventHubConnectionProcessor = null; } else { logger.warning("Shared EventHubConnectionProcessor was already disposed."); } } } private EventHubConnectionProcessor buildConnectionProcessor(MessageSerializer messageSerializer) { final ConnectionOptions connectionOptions = getConnectionOptions(); final TokenManagerProvider tokenManagerProvider = new AzureTokenManagerProvider( connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), ClientConstants.AZURE_ACTIVE_DIRECTORY_SCOPE); final ReactorProvider provider = new ReactorProvider(); final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider); final Map<String, String> properties = CoreUtils.getProperties(EVENTHUBS_PROPERTIES_FILE); final String product = properties.getOrDefault(NAME_KEY, UNKNOWN); final String clientVersion = properties.getOrDefault(VERSION_KEY, UNKNOWN); final Flux<EventHubAmqpConnection> connectionFlux = Flux.create(sink -> { sink.onRequest(request -> { if (request == 0) { return; } else if (request > 1) { sink.error(logger.logExceptionAsWarning(new IllegalArgumentException( "Requested more than one connection. Only emitting one. Request: " + request))); return; } final String connectionId = StringUtil.getRandomString("MF"); logger.info("connectionId[{}]: Emitting a single connection.", connectionId); final EventHubAmqpConnection connection = new EventHubReactorAmqpConnection(connectionId, connectionOptions, eventHubName, provider, handlerProvider, tokenManagerProvider, messageSerializer, product, clientVersion); sink.next(connection); }); }); return connectionFlux.subscribeWith(new EventHubConnectionProcessor( connectionOptions.getFullyQualifiedNamespace(), eventHubName, connectionOptions.getRetry())); } private ConnectionOptions getConnectionOptions() { configuration = configuration == null ? Configuration.getGlobalConfiguration().clone() : configuration; if (credentials == null) { final String connectionString = configuration.get(AZURE_EVENT_HUBS_CONNECTION_STRING); if (CoreUtils.isNullOrEmpty(connectionString)) { throw logger.logExceptionAsError(new IllegalArgumentException("Credentials have not been set. " + "They can be set using: connectionString(String), connectionString(String, String), " + "credentials(String, String, TokenCredential), or setting the environment variable '" + AZURE_EVENT_HUBS_CONNECTION_STRING + "' with a connection string")); } connectionString(connectionString); } if (proxyOptions == null) { proxyOptions = getDefaultProxyConfiguration(configuration); } if (proxyOptions != null && proxyOptions.isProxyAddressConfigured() && transport != AmqpTransportType.AMQP_WEB_SOCKETS) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot use a proxy when TransportType is not AMQP Web Sockets.")); } final CbsAuthorizationType authorizationType = credentials instanceof EventHubSharedKeyCredential ? CbsAuthorizationType.SHARED_ACCESS_SIGNATURE : CbsAuthorizationType.JSON_WEB_TOKEN; return new ConnectionOptions(fullyQualifiedNamespace, credentials, authorizationType, transport, retryOptions, proxyOptions, scheduler); } private ProxyOptions getDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE; if (proxyOptions != null) { authentication = proxyOptions.getAuthentication(); } String proxyAddress = configuration.get(Configuration.PROPERTY_HTTP_PROXY); if (CoreUtils.isNullOrEmpty(proxyAddress)) { return ProxyOptions.SYSTEM_DEFAULTS; } return getProxyOptions(authentication, proxyAddress); } private ProxyOptions getProxyOptions(ProxyAuthenticationType authentication, String proxyAddress) { String host; int port; if (HOST_PORT_PATTERN.matcher(proxyAddress.trim()).find()) { final String[] hostPort = proxyAddress.split(":"); host = hostPort[0]; port = Integer.parseInt(hostPort[1]); final Proxy proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); final String username = configuration.get(ProxyOptions.PROXY_USERNAME); final String password = configuration.get(ProxyOptions.PROXY_PASSWORD); return new ProxyOptions(authentication, proxy, username, password); } else { com.azure.core.http.ProxyOptions coreProxyOptions = com.azure.core.http.ProxyOptions .fromConfiguration(configuration); return new ProxyOptions(authentication, new Proxy(coreProxyOptions.getType().toProxyType(), coreProxyOptions.getAddress()), coreProxyOptions.getUsername(), coreProxyOptions.getPassword()); } } }
The other option was to consider Reentrant lock but since the thread that acquires the lock may not be the same as the one that releases it, semaphore is the only option here.
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(this.createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = this.getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(this.createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.increaseSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } }
publishingState.getSemaphore().acquireUninterruptibly();
public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning(Messages.CANNOT_SEND_EVENT_BATCH_EMPTY); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.verbose("Sending batch with size[{}] to partitionId[{}].", batch.getCount(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.verbose("Sending batch with size[{}] with partitionKey[{}].", batch.getCount(), batch.getPartitionKey()); } else { logger.verbose("Sending batch with size[{}] to be distributed round-robin in service.", batch.getCount()); } final String partitionKey = batch.getPartitionKey(); final boolean isTracingEnabled = tracerProvider.isEnabled(); final AtomicReference<Context> parentContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; Context sharedContext = null; final List<Message> messages = new ArrayList<>(); for (int i = 0; i < batch.getEvents().size(); i++) { final EventData event = batch.getEvents().get(i); if (isTracingEnabled) { parentContext.set(event.getContext()); if (i == 0) { sharedContext = tracerProvider.getSharedSpanBuilder(parentContext.get()); } tracerProvider.addSpanLinks(sharedContext.addData(SPAN_CONTEXT_KEY, event.getContext())); } if (!isIdempotentPartitionPublishing) { messages.add(createMessageFromEvent(event, partitionKey)); } } if (isTracingEnabled) { final Context finalSharedContext = sharedContext == null ? Context.NONE : sharedContext .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); parentContext.set(tracerProvider.startSpan(finalSharedContext, ProcessKind.SEND)); } if (isIdempotentPartitionPublishing) { PartitionPublishingState publishingState = getClientPartitionPublishingState(batch.getPartitionId()); return Mono.fromRunnable(() -> { publishingState.getSemaphore().acquireUninterruptibly(); int seqNumber = publishingState.getSequenceNumber(); for (EventData eventData : batch.getEvents()) { eventData.setProducerGroupIdInSysProperties(publishingState.getProducerGroupId()); eventData.setPublishedSequenceNumberInSysProperties(seqNumber); eventData.setProducerOwnerLevelInSysProperties(publishingState.getOwnerLevel()); seqNumber = PartitionPublishingUtils.incrementSequenceNumber(seqNumber); messages.add(createMessageFromEvent(eventData, partitionKey)); } }).then( withRetry(getSendLink(batch.getPartitionId()) .flatMap( link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy )).publishOn(scheduler).doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }).thenEmpty(Mono.fromRunnable(() -> { batch.setStartingPublishedSequenceNumber(publishingState.getSequenceNumber()); for (EventData eventData : batch.getEvents()) { eventData.commitProducerDataFromSysProperties(); } publishingState.incrementSequenceNumber(batch.getCount()); })).doFinally( signalType -> { publishingState.getSemaphore().release(); } ); } else { return withRetry(getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)), retryOptions.getTryTimeout(), retryPolicy) .publishOn(scheduler) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(parentContext.get(), signal); } }); } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(this.getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(this.getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(this.getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(this.getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (this.isIdempotentPartitionPublishing) { if (CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.isIdempotentPartitionPublishing)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && this.isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), this.isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (this.isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { this.setPartitionPublishingState( partitionId, (Long) properties.get(SymbolConstants.PRODUCER_ID), (Short) properties.get(SymbolConstants.PRODUCER_EPOCH), (Integer) properties.get(SymbolConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!this.isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } else { synchronized (this) { if (this.partitionPublishingStates.containsKey(partitionId)) { return this.partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); this.partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> this.updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnectionProcessor connectionProcessor; private final AmqpRetryOptions retryOptions; private final AmqpRetryPolicy retryPolicy; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Scheduler scheduler; private final boolean isSharedConnection; private final Runnable onClientClose; private final boolean isIdempotentPartitionPublishing; private final Map<String, PartitionPublishingState> partitionPublishingStates; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient( String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose, boolean isIdempotentPartitionPublishing, Map<String, PartitionPublishingState> initialPartitionPublishingStates ) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.retryPolicy = getRetryPolicy(retryOptions); this.scheduler = scheduler; this.isSharedConnection = isSharedConnection; this.isIdempotentPartitionPublishing = isIdempotentPartitionPublishing; if (isIdempotentPartitionPublishing) { if (initialPartitionPublishingStates == null) { this.partitionPublishingStates = new HashMap<>(); } else { this.partitionPublishingStates = initialPartitionPublishingStates; } } else { this.partitionPublishingStates = null; } } EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnectionProcessor connectionProcessor, AmqpRetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, Scheduler scheduler, boolean isSharedConnection, Runnable onClientClose ) { this(fullyQualifiedNamespace, eventHubName, connectionProcessor, retryOptions, tracerProvider, messageSerializer, scheduler, isSharedConnection, onClientClose, false, null); } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getEventHubProperties() { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getEventHubProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. * @throws NullPointerException if {@code partitionId} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connectionProcessor.flatMap(connection -> connection.getManagementNode()) .flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingProperties}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingProperties> getPartitionPublishingProperties(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState.toPartitionPublishingProperties())); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy).map( PartitionPublishingState::toPartitionPublishingProperties); } } /** * Get the idempotent producer's publishing state of a partition. * @param partitionId The partition id of the publishing state * @return A mono that has the {@link PartitionPublishingState}. * {@code null} if the partition doesn't have any state yet. * @throws UnsupportedOperationException if this producer isn't an idempotent producer. */ Mono<PartitionPublishingState> getPartitionPublishingState(String partitionId) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState.isFromLink()) { return Mono.defer(() -> Mono.just(publishingState)); } else { return withRetry(getSendLink(partitionId).flatMap(amqpSendLink -> Mono.just(getClientPartitionPublishingState(partitionId))), retryOptions.getTryTimeout(), retryPolicy); } } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} configured with the options specified. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. * @throws NullPointerException if {@code options} is null. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (isIdempotentPartitionPublishing && CoreUtils.isNullOrEmpty(options.getPartitionId())) { return monoError(logger, new IllegalArgumentException( "An idempotent producer can not create an EventDataBatch without partition id")); } final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); final int batchMaxSize = options.getMaximumSizeInBytes(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "CreateBatchOptions.getPartitionKey() and CreateBatchOptions.getPartitionId() are both set. " + "Only one or the other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } else if (!CoreUtils.isNullOrEmpty(partitionKey) && partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "Partition key '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH))); } return getSendLink(partitionId) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (batchMaxSize > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", batchMaxSize, maximumLinkSize))); } final int batchSize = batchMaxSize > 0 ? batchMaxSize : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, partitionId, partitionKey, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * {@codesnippet com.azure.messaging.eventhubs.eventhubasyncproducerclient.send * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. * @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (options.getPartitionId() == null && isIdempotentPartitionPublishing) { return monoError(logger, new IllegalArgumentException("Please set the partition id in `options` " + "because this producer client is an idempotent producer")); } return sendInternal(events, options).publishOn(scheduler); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ private Message createMessageFromEvent(EventData event, String partitionKey) { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.getPartitionKey(); final String partitionId = options.getPartitionId(); if (!CoreUtils.isNullOrEmpty(partitionKey) && !CoreUtils.isNullOrEmpty(partitionId)) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "SendOptions.getPartitionKey() and SendOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", partitionKey, partitionId))); } return getSendLink(options.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions batchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, link::getErrorContext, tracerProvider, link.getEntityPath(), link.getHostname(), isIdempotentPartitionPublishing)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list)))); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error(Messages.ERROR_SENDING_BATCH, error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private Mono<AmqpSendLink> updatePublishingState(String partitionId, AmqpSendLink amqpSendLink) { if (isIdempotentPartitionPublishing) { return amqpSendLink.getRemoteProperties().map(properties -> { setPartitionPublishingState( partitionId, (Long) properties.get(ClientConstants.PRODUCER_ID), (Short) properties.get(ClientConstants.PRODUCER_EPOCH), (Integer) properties.get(ClientConstants.PRODUCER_SEQUENCE_NUMBER) ); return amqpSendLink; }); } else { return Mono.just(amqpSendLink); } } /** * Get the idempotent producer's publishing state of a partition from the client side maintained state. * It doesn't create a link to get state from the service. */ private PartitionPublishingState getClientPartitionPublishingState(String partitionId) { if (!isIdempotentPartitionPublishing) { throw logger.logExceptionAsWarning( new IllegalStateException("getPartitionPublishingState() shouldn't be called if the producer" + " is not an idempotent producer.")); } if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } else { synchronized (partitionPublishingStates) { if (partitionPublishingStates.containsKey(partitionId)) { return partitionPublishingStates.get(partitionId); } PartitionPublishingState state = new PartitionPublishingState(); partitionPublishingStates.put(partitionId, state); return state; } } } private void setPartitionPublishingState( String partitionId, Long producerGroupId, Short ownerLevel, Integer sequenceNumber) { PartitionPublishingState publishingState = getClientPartitionPublishingState(partitionId); if (publishingState != null && (publishingState.getSequenceNumber() == null || publishingState.getSequenceNumber() <= sequenceNumber)) { publishingState.setOwnerLevel(ownerLevel); publishingState.setProducerGroupId(producerGroupId); publishingState.setSequenceNumber(sequenceNumber); publishingState.setFromLink(true); } } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final String linkName = getEntityPath(partitionId); return connectionProcessor .flatMap(connection -> isIdempotentPartitionPublishing ? connection.createSendLink( linkName, entityPath, retryOptions, true, getClientPartitionPublishingState(partitionId)) : connection.createSendLink( linkName, entityPath, retryOptions)) .flatMap(amqpSendLink -> updatePublishingState(partitionId, amqpSendLink)); } /** * Disposes of the {@link EventHubProducerAsyncClient}. If the client had a dedicated connection, the underlying * connection is also closed. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (isSharedConnection) { onClientClose.run(); } else { connectionProcessor.dispose(); } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * AmqpErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; private final boolean isCreatedByIdempotentProducer; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname, boolean isCreatedByIdempotentProducer) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; this.isCreatedByIdempotentProducer = isCreatedByIdempotentProducer; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, isCreatedByIdempotentProducer); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, Messages.EVENT_DATA_DOES_NOT_FIT, maxNumberOfBatches); throw new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider, tracerProvider, entityPath, hostname, this.isCreatedByIdempotentProducer); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
Looking at the [docs ](https://docs.microsoft.com/en-us/java/api/com.azure.storage.blob.specialized.blobasyncclientbase.download?view=azure-java-stable#com_azure_storage_blob_specialized_BlobAsyncClientBase_download__) for this API, would it be better to use this which is simpler, more closely matches the doc sample, and also more closely matches the sync code? ```java NullOutputStream.getInstance().write(b.array()) ```
public Mono<Void> runAsync() { return blobAsyncClient.download() .map(b -> { int readCount = 0; int remaining = b.remaining(); while (readCount < remaining) { int expectedReadCount = Math.min(remaining - readCount, BUFFER_SIZE); b.get(buffer, 0, expectedReadCount); readCount += expectedReadCount; } return 1; }).then(); }
int readCount = 0;
public Mono<Void> runAsync() { return blobAsyncClient.download() .map(b -> { int readCount = 0; int remaining = b.remaining(); while (readCount < remaining) { int expectedReadCount = Math.min(remaining - readCount, BUFFER_SIZE); b.get(buffer, 0, expectedReadCount); readCount += expectedReadCount; } return 1; }).then(); }
class NullOutputStream extends OutputStream { @Override public void write(int b) { } @Override public void write(byte[] b) { } @Override public void write(byte[] b, int off, int len) { } }
class NullOutputStream extends OutputStream { @Override public void write(int b) { } @Override public void write(byte[] b) { } @Override public void write(byte[] b, int off, int len) { } }
Hmm, that sample should be updated as a `ByteBuffer` isn't guaranteed to have a backing `byte[]`. If the `ByteBuffer` instance is a `DirectByteBuffer` (aka OS managed memory) that will throw an exception.
public Mono<Void> runAsync() { return blobAsyncClient.download() .map(b -> { int readCount = 0; int remaining = b.remaining(); while (readCount < remaining) { int expectedReadCount = Math.min(remaining - readCount, BUFFER_SIZE); b.get(buffer, 0, expectedReadCount); readCount += expectedReadCount; } return 1; }).then(); }
int readCount = 0;
public Mono<Void> runAsync() { return blobAsyncClient.download() .map(b -> { int readCount = 0; int remaining = b.remaining(); while (readCount < remaining) { int expectedReadCount = Math.min(remaining - readCount, BUFFER_SIZE); b.get(buffer, 0, expectedReadCount); readCount += expectedReadCount; } return 1; }).then(); }
class NullOutputStream extends OutputStream { @Override public void write(int b) { } @Override public void write(byte[] b) { } @Override public void write(byte[] b, int off, int len) { } }
class NullOutputStream extends OutputStream { @Override public void write(int b) { } @Override public void write(byte[] b) { } @Override public void write(byte[] b, int off, int len) { } }
Unfortunately, we'll have to iterate through the collection twice - once to convert Iterable to list and then when serializing. This impacts performance. Not sure, if we can avoid this without changing the API to take a `List` instead.
Mono<Void> sendCustomEvents(Iterable<Object> events, Context context) { return Flux.fromIterable(events) .collectList() .flatMap(list -> this.impl.publishCustomEventEventsAsync(this.hostname, list, context)); }
.flatMap(list -> this.impl.publishCustomEventEventsAsync(this.hostname, list, context));
Mono<Void> sendCustomEvents(Iterable<Object> events, Context context) { return Flux.fromIterable(events) .collectList() .flatMap(list -> this.impl.publishCustomEventEventsAsync(this.hostname, list, context)); }
class EventGridPublisherAsyncClient { private final String hostname; private final EventGridPublisherClientImpl impl; private final EventGridServiceVersion serviceVersion; EventGridPublisherAsyncClient(HttpPipeline pipeline, String hostname, SerializerAdapter serializerAdapter, EventGridServiceVersion serviceVersion) { this.impl = new EventGridPublisherClientImplBuilder() .pipeline(pipeline) .serializerAdapter(serializerAdapter) .buildClient(); this.serviceVersion = serviceVersion; this.hostname = hostname; } /** * Get the service version of the Rest API. * @return the Service version of the rest API */ public EventGridServiceVersion getServiceVersion() { return this.serviceVersion; } /** * Publishes the given EventGrid events to the set topic or domain. * @param events the EventGrid events to publish. * * @return the completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> sendEvents(Iterable<EventGridEvent> events) { return withContext(context -> sendEvents(events, context)); } Mono<Void> sendEvents(Iterable<EventGridEvent> events, Context context) { return Flux.fromIterable(events) .map(EventGridEvent::toImpl) .collectList() .flatMap(list -> this.impl.publishEventsAsync(this.hostname, list, context)); } /** * Publishes the given cloud events to the set topic or domain. * @param events the cloud events to publish. * * @return the completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> sendCloudEvents(Iterable<CloudEvent> events) { return withContext(context -> sendCloudEvents(events, context)); } Mono<Void> sendCloudEvents(Iterable<CloudEvent> events, Context context) { return Flux.fromIterable(events) .map(CloudEvent::toImpl) .collectList() .flatMap(list -> this.impl.publishCloudEventEventsAsync(this.hostname, list, context)); } /** * Publishes the given custom events to the set topic or domain. * @param events the custom events to publish. * * @return the completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> sendCustomEvents(Iterable<Object> events) { return withContext(context -> sendCustomEvents(events, context)); } /** * Publishes the given EventGrid events to the set topic or domain and gives the response issued by EventGrid. * @param events the EventGrid events to publish. * * @return the response from the EventGrid service. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sendEventsWithResponse(Iterable<EventGridEvent> events) { return withContext(context -> sendEventsWithResponse(events, context)); } Mono<Response<Void>> sendEventsWithResponse(Iterable<EventGridEvent> events, Context context) { return Flux.fromIterable(events) .map(EventGridEvent::toImpl) .collectList() .flatMap(list -> this.impl.publishEventsWithResponseAsync(this.hostname, list, context)); } /** * Publishes the given cloud events to the set topic or domain and gives the response issued by EventGrid. * @param events the cloud events to publish. * * @return the response from the EventGrid service. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sendCloudEventsWithResponse(Iterable<CloudEvent> events) { return withContext(context -> sendCloudEventsWithResponse(events, context)); } Mono<Response<Void>> sendCloudEventsWithResponse(Iterable<CloudEvent> events, Context context) { return Flux.fromIterable(events) .map(CloudEvent::toImpl) .collectList() .flatMap(list -> this.impl.publishCloudEventEventsWithResponseAsync(this.hostname, list, context)); } /** * Publishes the given custom events to the set topic or domain and gives the response issued by EventGrid. * @param events the custom events to publish. * * @return the response from the EventGrid service. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sendCustomEventsWithResponse(Iterable<Object> events) { return withContext(context -> sendCustomEventsWithResponse(events, context)); } Mono<Response<Void>> sendCustomEventsWithResponse(Iterable<Object> events, Context context) { return Flux.fromIterable(events) .collectList() .flatMap(list -> this.impl.publishCustomEventEventsWithResponseAsync(this.hostname, list, context)); } }
class EventGridPublisherAsyncClient { private final String hostname; private final EventGridPublisherClientImpl impl; private final EventGridServiceVersion serviceVersion; EventGridPublisherAsyncClient(HttpPipeline pipeline, String hostname, SerializerAdapter serializerAdapter, EventGridServiceVersion serviceVersion) { this.impl = new EventGridPublisherClientImplBuilder() .pipeline(pipeline) .serializerAdapter(serializerAdapter) .buildClient(); this.serviceVersion = serviceVersion; this.hostname = hostname; } /** * Get the service version of the Rest API. * @return the Service version of the rest API */ public EventGridServiceVersion getServiceVersion() { return this.serviceVersion; } /** * Publishes the given EventGrid events to the set topic or domain. * @param events the EventGrid events to publish. * * @return the completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> sendEvents(Iterable<EventGridEvent> events) { return withContext(context -> sendEvents(events, context)); } Mono<Void> sendEvents(Iterable<EventGridEvent> events, Context context) { return Flux.fromIterable(events) .map(EventGridEvent::toImpl) .collectList() .flatMap(list -> this.impl.publishEventsAsync(this.hostname, list, context)); } /** * Publishes the given cloud events to the set topic or domain. * @param events the cloud events to publish. * * @return the completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> sendCloudEvents(Iterable<CloudEvent> events) { return withContext(context -> sendCloudEvents(events, context)); } Mono<Void> sendCloudEvents(Iterable<CloudEvent> events, Context context) { return Flux.fromIterable(events) .map(CloudEvent::toImpl) .collectList() .flatMap(list -> this.impl.publishCloudEventEventsAsync(this.hostname, list, context)); } /** * Publishes the given custom events to the set topic or domain. * @param events the custom events to publish. * * @return the completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> sendCustomEvents(Iterable<Object> events) { return withContext(context -> sendCustomEvents(events, context)); } /** * Publishes the given EventGrid events to the set topic or domain and gives the response issued by EventGrid. * @param events the EventGrid events to publish. * * @return the response from the EventGrid service. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sendEventsWithResponse(Iterable<EventGridEvent> events) { return withContext(context -> sendEventsWithResponse(events, context)); } Mono<Response<Void>> sendEventsWithResponse(Iterable<EventGridEvent> events, Context context) { return Flux.fromIterable(events) .map(EventGridEvent::toImpl) .collectList() .flatMap(list -> this.impl.publishEventsWithResponseAsync(this.hostname, list, context)); } /** * Publishes the given cloud events to the set topic or domain and gives the response issued by EventGrid. * @param events the cloud events to publish. * * @return the response from the EventGrid service. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sendCloudEventsWithResponse(Iterable<CloudEvent> events) { return withContext(context -> sendCloudEventsWithResponse(events, context)); } Mono<Response<Void>> sendCloudEventsWithResponse(Iterable<CloudEvent> events, Context context) { return Flux.fromIterable(events) .map(CloudEvent::toImpl) .collectList() .flatMap(list -> this.impl.publishCloudEventEventsWithResponseAsync(this.hostname, list, context)); } /** * Publishes the given custom events to the set topic or domain and gives the response issued by EventGrid. * @param events the custom events to publish. * * @return the response from the EventGrid service. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sendCustomEventsWithResponse(Iterable<Object> events) { return withContext(context -> sendCustomEventsWithResponse(events, context)); } Mono<Response<Void>> sendCustomEventsWithResponse(Iterable<Object> events, Context context) { return Flux.fromIterable(events) .collectList() .flatMap(list -> this.impl.publishCustomEventEventsWithResponseAsync(this.hostname, list, context)); } }
https://azure.github.io/azure-sdk/java_implementation.html#java-errors-system-errors Split the exception into NPE and IllegalArgumentException when the input is null and empty respectively.
public EventGridSasCredential(String sas) { if (CoreUtils.isNullOrEmpty(sas)) { throw logger.logExceptionAsError(new IllegalArgumentException("the access signature cannot be null or empty")); } this.sas = sas; }
}
public EventGridSasCredential(String sas) { if (sas == null) { throw logger.logExceptionAsError(new IllegalArgumentException("the access signature cannot be null")); } if (sas.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException("the access signature cannot be empty")); } this.sas = sas; }
class EventGridSasCredential { private String sas; private static final ClientLogger logger = new ClientLogger(EventGridSasCredential.class); /** * Generate a shared access signature to provide time-limited authentication for requests to the Event Grid * service. * @param endpoint the endpoint of the Event Grid topic or domain. * @param expirationTime the time in which the signature should expire, no longer providing authentication. * @param keyCredential the access key obtained from the Event Grid topic or domain. * * @return the shared access signature string which can be used to construct an instance of * {@link EventGridSasCredential}. */ public static String createSas(String endpoint, OffsetDateTime expirationTime, AzureKeyCredential keyCredential) { try { String resKey = "r"; String expKey = "e"; String signKey = "s"; Charset charset = StandardCharsets.UTF_8; String encodedResource = URLEncoder.encode(endpoint, charset.name()); String encodedExpiration = URLEncoder.encode(expirationTime.atZoneSameInstant(ZoneOffset.UTC).format( DateTimeFormatter.ofPattern("M/d/yyyy h:m:s a")), charset.name()); String unsignedSas = String.format("%s=%s&%s=%s", resKey, encodedResource, expKey, encodedExpiration); Mac hmac = Mac.getInstance("hmacSHA256"); hmac.init(new SecretKeySpec(Base64.getDecoder().decode(keyCredential.getKey()), "hmacSHA256")); String signature = new String(Base64.getEncoder().encode( hmac.doFinal(unsignedSas.getBytes(charset))), charset); String encodedSignature = URLEncoder.encode(signature, charset.name()); return String.format("%s&%s=%s", unsignedSas, signKey, encodedSignature); } catch (NoSuchAlgorithmException | UnsupportedEncodingException | InvalidKeyException e) { throw new RuntimeException(logger.logThrowableAsError(e)); } } /** * Create an instance of this object to authenticate calls to the EventGrid service. * @param sas the shared access signature to use. */ /** * Get the token string to authenticate service calls * @return the Shared Access Signature as a string */ public String getSas() { return sas; } /** * Change the shared access signature token to a new one. * @param sas the shared access signature token to use. */ public void update(String sas) { this.sas = sas; } }
class EventGridSasCredential { private String sas; private static final ClientLogger logger = new ClientLogger(EventGridSasCredential.class); /** * Generate a shared access signature to provide time-limited authentication for requests to the Event Grid * service. * @param endpoint the endpoint of the Event Grid topic or domain. * @param expirationTime the time in which the signature should expire, no longer providing authentication. * @param keyCredential the access key obtained from the Event Grid topic or domain. * * @return the shared access signature string which can be used to construct an instance of * {@link EventGridSasCredential}. */ public static String createSas(String endpoint, OffsetDateTime expirationTime, AzureKeyCredential keyCredential) { try { String resKey = "r"; String expKey = "e"; String signKey = "s"; Charset charset = StandardCharsets.UTF_8; String encodedResource = URLEncoder.encode(endpoint, charset.name()); String encodedExpiration = URLEncoder.encode(expirationTime.atZoneSameInstant(ZoneOffset.UTC).format( DateTimeFormatter.ofPattern("M/d/yyyy h:m:s a")), charset.name()); String unsignedSas = String.format("%s=%s&%s=%s", resKey, encodedResource, expKey, encodedExpiration); Mac hmac = Mac.getInstance("hmacSHA256"); hmac.init(new SecretKeySpec(Base64.getDecoder().decode(keyCredential.getKey()), "hmacSHA256")); String signature = new String(Base64.getEncoder().encode( hmac.doFinal(unsignedSas.getBytes(charset))), charset); String encodedSignature = URLEncoder.encode(signature, charset.name()); return String.format("%s&%s=%s", unsignedSas, signKey, encodedSignature); } catch (NoSuchAlgorithmException | UnsupportedEncodingException | InvalidKeyException e) { throw logger.logExceptionAsError(new RuntimeException(e)); } } /** * Create an instance of this object to authenticate calls to the EventGrid service. * @param sas the shared access signature to use. */ /** * Get the token string to authenticate service calls * @return the Shared Access Signature as a string */ public String getSas() { return sas; } /** * Change the shared access signature token to a new one. * @param sas the shared access signature token to use. */ public void update(String sas) { this.sas = sas; } }
Use string constants.
public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { URI redirectUri; try { redirectUri = new URI(String.format("http: } catch (URISyntaxException e) { return Mono.error(logger.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters parameters = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .build(); Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); }
redirectUri = new URI(String.format("http:
public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { URI redirectUri; try { redirectUri = new URI(HTTP_LOCALHOST + ":" + port); } catch (URISyntaxException e) { return Mono.error(logger.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters parameters = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .build(); Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found"; private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String DEFAULT_PUBLIC_CACHE_FILE_NAME = "msal.cache"; private static final String DEFAULT_CONFIDENTIAL_CACHE_FILE_NAME = "msal.confidential.cache"; private static final Path DEFAULT_CACHE_FILE_PATH = Platform.isWindows() ? Paths.get(System.getProperty("user.home"), "AppData", "Local", ".IdentityService") : Paths.get(System.getProperty("user.home"), ".IdentityService"); private static final String DEFAULT_KEYCHAIN_SERVICE = "Microsoft.Developer.IdentityService"; private static final String DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT = "MSALCache"; private static final String DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT = "MSALConfidentialCache"; private static final String DEFAULT_KEYRING_NAME = "default"; private static final String DEFAULT_KEYRING_SCHEMA = "msal.cache"; private static final String DEFAULT_PUBLIC_KEYRING_ITEM_NAME = DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT; private static final String DEFAULT_CONFIDENTIAL_KEYRING_ITEM_NAME = DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT; private static final String DEFAULT_KEYRING_ATTR_NAME = "MsalClientID"; private static final String DEFAULT_KEYRING_ATTR_VALUE = "Microsoft.Developer.IdentityService"; private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String clientSecret; private final String certificatePath; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String certificatePassword, boolean isSharedTokenCacheCredential, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.clientSecret = clientSecret; this.certificatePath = certificatePath; this.certificatePassword = certificatePassword; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<PublicClientApplication>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<ConfidentialClientApplication>(() -> getConfidentialClientApplication()); } private ConfidentialClientApplication getConfidentialClientApplication() { if (clientId == null) { throw logger.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(certificatePath)); credential = ClientCredentialFactory.createFromCertificate( CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes)); } else { credential = ClientCredentialFactory.createFromCertificate( new FileInputStream(certificatePath), certificatePassword); } } catch (IOException | GeneralSecurityException e) { throw logger.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e)); } } else { throw logger.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path")); } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } if (options.isSharedTokenCacheEnabled()) { try { PersistenceSettings.Builder persistenceSettingsBuilder = PersistenceSettings.builder( DEFAULT_CONFIDENTIAL_CACHE_FILE_NAME, DEFAULT_CACHE_FILE_PATH); if (Platform.isMac()) { persistenceSettingsBuilder.setMacKeychain( DEFAULT_KEYCHAIN_SERVICE, DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT); } if (Platform.isLinux()) { try { persistenceSettingsBuilder .setLinuxKeyring(DEFAULT_KEYRING_NAME, DEFAULT_KEYRING_SCHEMA, DEFAULT_CONFIDENTIAL_KEYRING_ITEM_NAME, DEFAULT_KEYRING_ATTR_NAME, DEFAULT_KEYRING_ATTR_VALUE, null, null); applicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } catch (KeyRingAccessException e) { if (!options.getAllowUnencryptedCache()) { throw logger.logExceptionAsError(e); } persistenceSettingsBuilder.setLinuxUseUnprotectedFileAsCacheStorage(true); applicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } } } catch (Throwable t) { throw logger.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } return applicationBuilder.build(); } private PublicClientApplication getPublicClientApplication(boolean sharedTokenCacheCredential) { if (clientId == null) { throw logger.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (options.isSharedTokenCacheEnabled()) { try { PersistenceSettings.Builder persistenceSettingsBuilder = PersistenceSettings.builder( DEFAULT_PUBLIC_CACHE_FILE_NAME, DEFAULT_CACHE_FILE_PATH); if (Platform.isWindows()) { publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } else if (Platform.isMac()) { persistenceSettingsBuilder.setMacKeychain( DEFAULT_KEYCHAIN_SERVICE, DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } else if (Platform.isLinux()) { try { persistenceSettingsBuilder .setLinuxKeyring(DEFAULT_KEYRING_NAME, DEFAULT_KEYRING_SCHEMA, DEFAULT_PUBLIC_KEYRING_ITEM_NAME, DEFAULT_KEYRING_ATTR_NAME, DEFAULT_KEYRING_ATTR_VALUE, null, null); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } catch (KeyRingAccessException e) { if (!options.getAllowUnencryptedCache()) { throw logger.logExceptionAsError(e); } persistenceSettingsBuilder.setLinuxUseUnprotectedFileAsCacheStorage(true); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } } } catch (Throwable t) { String message = "Shared token cache is unavailable in this environment."; if (sharedTokenCacheCredential) { throw logger.logExceptionAsError(new CredentialUnavailableException(message, t)); } else { throw logger.logExceptionAsError(new ClientAuthenticationException(message, null, t)); } } } return publicClientApplicationBuilder.build(); } public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails = cacheAccessor.getAuthDetailsIfAvailable(); String authType = authDetails.getAuthMethod(); if (authType.equalsIgnoreCase("SP")) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if (authType.equalsIgnoreCase("DC")) { JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters parameters = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), refreshToken) .build(); return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parameters)).map(MsalToken::new)); } else { throw logger.logExceptionAsError(new CredentialUnavailableException( "IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE.")); } } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(logger.logExceptionAsError(ex)); } command.append(scopes); AccessToken token = null; BufferedReader reader = null; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw logger.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from.")); } builder.redirectErrorStream(true); Process process = builder.start(); reader = new BufferedReader(new InputStreamReader(process.getInputStream(), "UTF-8")); String line; StringBuilder output = new StringBuilder(); while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) { throw logger.logExceptionAsError( new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed")); } output.append(line); } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo("\"accessToken\": \"(.*?)(\"|$)", processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw logger.logExceptionAsError( new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account")); } throw logger.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw logger.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } finally { try { if (reader != null) { reader.close(); } } catch (IOException ex) { return Mono.error(logger.logExceptionAsError(new IllegalStateException(ex))); } } return Mono.just(token); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())) .map(MsalToken::new)); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(UserNamePasswordParameters.builder( new HashSet<>(request.getScopes()), username, password.toCharArray()).build())) .onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to login to acquire the last token * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (account != null) { parametersBuilder = parametersBuilder.account(account); } try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return pc.acquireToken(parameters); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Sutdio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = accessor.getCredentials("VS Code Azure", cloud); RefreshTokenParameters parameters = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential) .build(); return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parameters)).map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters parameters = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build(); Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache.")); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { return Mono.fromCallable(() -> { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw logger.logExceptionAsError( new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String regex, String input) { return input.replaceAll(regex, "****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { logger.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found"; private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String DEFAULT_PUBLIC_CACHE_FILE_NAME = "msal.cache"; private static final String DEFAULT_CONFIDENTIAL_CACHE_FILE_NAME = "msal.confidential.cache"; private static final Path DEFAULT_CACHE_FILE_PATH = Platform.isWindows() ? Paths.get(System.getProperty("user.home"), "AppData", "Local", ".IdentityService") : Paths.get(System.getProperty("user.home"), ".IdentityService"); private static final String DEFAULT_KEYCHAIN_SERVICE = "Microsoft.Developer.IdentityService"; private static final String DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT = "MSALCache"; private static final String DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT = "MSALConfidentialCache"; private static final String DEFAULT_KEYRING_NAME = "default"; private static final String DEFAULT_KEYRING_SCHEMA = "msal.cache"; private static final String DEFAULT_PUBLIC_KEYRING_ITEM_NAME = DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT; private static final String DEFAULT_CONFIDENTIAL_KEYRING_ITEM_NAME = DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT; private static final String DEFAULT_KEYRING_ATTR_NAME = "MsalClientID"; private static final String DEFAULT_KEYRING_ATTR_VALUE = "Microsoft.Developer.IdentityService"; private static final String HTTP_LOCALHOST = "http: private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String clientSecret; private final String certificatePath; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String certificatePassword, boolean isSharedTokenCacheCredential, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.clientSecret = clientSecret; this.certificatePath = certificatePath; this.certificatePassword = certificatePassword; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<PublicClientApplication>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<ConfidentialClientApplication>(() -> getConfidentialClientApplication()); } private ConfidentialClientApplication getConfidentialClientApplication() { if (clientId == null) { throw logger.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(certificatePath)); credential = ClientCredentialFactory.createFromCertificate( CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes)); } else { credential = ClientCredentialFactory.createFromCertificate( new FileInputStream(certificatePath), certificatePassword); } } catch (IOException | GeneralSecurityException e) { throw logger.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e)); } } else { throw logger.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path")); } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } if (options.isSharedTokenCacheEnabled()) { try { PersistenceSettings.Builder persistenceSettingsBuilder = PersistenceSettings.builder( DEFAULT_CONFIDENTIAL_CACHE_FILE_NAME, DEFAULT_CACHE_FILE_PATH); if (Platform.isMac()) { persistenceSettingsBuilder.setMacKeychain( DEFAULT_KEYCHAIN_SERVICE, DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT); } if (Platform.isLinux()) { try { persistenceSettingsBuilder .setLinuxKeyring(DEFAULT_KEYRING_NAME, DEFAULT_KEYRING_SCHEMA, DEFAULT_CONFIDENTIAL_KEYRING_ITEM_NAME, DEFAULT_KEYRING_ATTR_NAME, DEFAULT_KEYRING_ATTR_VALUE, null, null); applicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } catch (KeyRingAccessException e) { if (!options.getAllowUnencryptedCache()) { throw logger.logExceptionAsError(e); } persistenceSettingsBuilder.setLinuxUseUnprotectedFileAsCacheStorage(true); applicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } } } catch (Throwable t) { throw logger.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } return applicationBuilder.build(); } private PublicClientApplication getPublicClientApplication(boolean sharedTokenCacheCredential) { if (clientId == null) { throw logger.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (options.isSharedTokenCacheEnabled()) { try { PersistenceSettings.Builder persistenceSettingsBuilder = PersistenceSettings.builder( DEFAULT_PUBLIC_CACHE_FILE_NAME, DEFAULT_CACHE_FILE_PATH); if (Platform.isWindows()) { publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } else if (Platform.isMac()) { persistenceSettingsBuilder.setMacKeychain( DEFAULT_KEYCHAIN_SERVICE, DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } else if (Platform.isLinux()) { try { persistenceSettingsBuilder .setLinuxKeyring(DEFAULT_KEYRING_NAME, DEFAULT_KEYRING_SCHEMA, DEFAULT_PUBLIC_KEYRING_ITEM_NAME, DEFAULT_KEYRING_ATTR_NAME, DEFAULT_KEYRING_ATTR_VALUE, null, null); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } catch (KeyRingAccessException e) { if (!options.getAllowUnencryptedCache()) { throw logger.logExceptionAsError(e); } persistenceSettingsBuilder.setLinuxUseUnprotectedFileAsCacheStorage(true); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } } } catch (Throwable t) { String message = "Shared token cache is unavailable in this environment."; if (sharedTokenCacheCredential) { throw logger.logExceptionAsError(new CredentialUnavailableException(message, t)); } else { throw logger.logExceptionAsError(new ClientAuthenticationException(message, null, t)); } } } return publicClientApplicationBuilder.build(); } public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails = cacheAccessor.getAuthDetailsIfAvailable(); String authType = authDetails.getAuthMethod(); if (authType.equalsIgnoreCase("SP")) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if (authType.equalsIgnoreCase("DC")) { JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters parameters = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), refreshToken) .build(); return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parameters)).map(MsalToken::new)); } else { throw logger.logExceptionAsError(new CredentialUnavailableException( "IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE.")); } } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(logger.logExceptionAsError(ex)); } command.append(scopes); AccessToken token = null; BufferedReader reader = null; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw logger.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from.")); } builder.redirectErrorStream(true); Process process = builder.start(); reader = new BufferedReader(new InputStreamReader(process.getInputStream(), "UTF-8")); String line; StringBuilder output = new StringBuilder(); while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) { throw logger.logExceptionAsError( new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed")); } output.append(line); } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo("\"accessToken\": \"(.*?)(\"|$)", processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw logger.logExceptionAsError( new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account")); } throw logger.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw logger.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } finally { try { if (reader != null) { reader.close(); } } catch (IOException ex) { return Mono.error(logger.logExceptionAsError(new IllegalStateException(ex))); } } return Mono.just(token); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())) .map(MsalToken::new)); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(UserNamePasswordParameters.builder( new HashSet<>(request.getScopes()), username, password.toCharArray()).build())) .onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to login to acquire the last token * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (account != null) { parametersBuilder = parametersBuilder.account(account); } try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return pc.acquireToken(parameters); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Sutdio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = accessor.getCredentials("VS Code Azure", cloud); RefreshTokenParameters parameters = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential) .build(); return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parameters)).map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters parameters = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build(); Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache.")); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { return Mono.fromCallable(() -> { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw logger.logExceptionAsError( new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw logger.logExceptionAsError( new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String regex, String input) { return input.replaceAll(regex, "****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { logger.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } }
Do you need to use `fromFuture`? Can it just be: ```java .flatMap(pc -> pc.acquireToken(parameters)); ```
public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { URI redirectUri; try { redirectUri = new URI(String.format("http: } catch (URISyntaxException e) { return Mono.error(logger.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters parameters = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .build(); Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); }
.flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters)));
public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { URI redirectUri; try { redirectUri = new URI(HTTP_LOCALHOST + ":" + port); } catch (URISyntaxException e) { return Mono.error(logger.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters parameters = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .build(); Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found"; private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String DEFAULT_PUBLIC_CACHE_FILE_NAME = "msal.cache"; private static final String DEFAULT_CONFIDENTIAL_CACHE_FILE_NAME = "msal.confidential.cache"; private static final Path DEFAULT_CACHE_FILE_PATH = Platform.isWindows() ? Paths.get(System.getProperty("user.home"), "AppData", "Local", ".IdentityService") : Paths.get(System.getProperty("user.home"), ".IdentityService"); private static final String DEFAULT_KEYCHAIN_SERVICE = "Microsoft.Developer.IdentityService"; private static final String DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT = "MSALCache"; private static final String DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT = "MSALConfidentialCache"; private static final String DEFAULT_KEYRING_NAME = "default"; private static final String DEFAULT_KEYRING_SCHEMA = "msal.cache"; private static final String DEFAULT_PUBLIC_KEYRING_ITEM_NAME = DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT; private static final String DEFAULT_CONFIDENTIAL_KEYRING_ITEM_NAME = DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT; private static final String DEFAULT_KEYRING_ATTR_NAME = "MsalClientID"; private static final String DEFAULT_KEYRING_ATTR_VALUE = "Microsoft.Developer.IdentityService"; private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String clientSecret; private final String certificatePath; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String certificatePassword, boolean isSharedTokenCacheCredential, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.clientSecret = clientSecret; this.certificatePath = certificatePath; this.certificatePassword = certificatePassword; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<PublicClientApplication>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<ConfidentialClientApplication>(() -> getConfidentialClientApplication()); } private ConfidentialClientApplication getConfidentialClientApplication() { if (clientId == null) { throw logger.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(certificatePath)); credential = ClientCredentialFactory.createFromCertificate( CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes)); } else { credential = ClientCredentialFactory.createFromCertificate( new FileInputStream(certificatePath), certificatePassword); } } catch (IOException | GeneralSecurityException e) { throw logger.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e)); } } else { throw logger.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path")); } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } if (options.isSharedTokenCacheEnabled()) { try { PersistenceSettings.Builder persistenceSettingsBuilder = PersistenceSettings.builder( DEFAULT_CONFIDENTIAL_CACHE_FILE_NAME, DEFAULT_CACHE_FILE_PATH); if (Platform.isMac()) { persistenceSettingsBuilder.setMacKeychain( DEFAULT_KEYCHAIN_SERVICE, DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT); } if (Platform.isLinux()) { try { persistenceSettingsBuilder .setLinuxKeyring(DEFAULT_KEYRING_NAME, DEFAULT_KEYRING_SCHEMA, DEFAULT_CONFIDENTIAL_KEYRING_ITEM_NAME, DEFAULT_KEYRING_ATTR_NAME, DEFAULT_KEYRING_ATTR_VALUE, null, null); applicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } catch (KeyRingAccessException e) { if (!options.getAllowUnencryptedCache()) { throw logger.logExceptionAsError(e); } persistenceSettingsBuilder.setLinuxUseUnprotectedFileAsCacheStorage(true); applicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } } } catch (Throwable t) { throw logger.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } return applicationBuilder.build(); } private PublicClientApplication getPublicClientApplication(boolean sharedTokenCacheCredential) { if (clientId == null) { throw logger.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (options.isSharedTokenCacheEnabled()) { try { PersistenceSettings.Builder persistenceSettingsBuilder = PersistenceSettings.builder( DEFAULT_PUBLIC_CACHE_FILE_NAME, DEFAULT_CACHE_FILE_PATH); if (Platform.isWindows()) { publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } else if (Platform.isMac()) { persistenceSettingsBuilder.setMacKeychain( DEFAULT_KEYCHAIN_SERVICE, DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } else if (Platform.isLinux()) { try { persistenceSettingsBuilder .setLinuxKeyring(DEFAULT_KEYRING_NAME, DEFAULT_KEYRING_SCHEMA, DEFAULT_PUBLIC_KEYRING_ITEM_NAME, DEFAULT_KEYRING_ATTR_NAME, DEFAULT_KEYRING_ATTR_VALUE, null, null); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } catch (KeyRingAccessException e) { if (!options.getAllowUnencryptedCache()) { throw logger.logExceptionAsError(e); } persistenceSettingsBuilder.setLinuxUseUnprotectedFileAsCacheStorage(true); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } } } catch (Throwable t) { String message = "Shared token cache is unavailable in this environment."; if (sharedTokenCacheCredential) { throw logger.logExceptionAsError(new CredentialUnavailableException(message, t)); } else { throw logger.logExceptionAsError(new ClientAuthenticationException(message, null, t)); } } } return publicClientApplicationBuilder.build(); } public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails = cacheAccessor.getAuthDetailsIfAvailable(); String authType = authDetails.getAuthMethod(); if (authType.equalsIgnoreCase("SP")) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if (authType.equalsIgnoreCase("DC")) { JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters parameters = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), refreshToken) .build(); return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parameters)).map(MsalToken::new)); } else { throw logger.logExceptionAsError(new CredentialUnavailableException( "IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE.")); } } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(logger.logExceptionAsError(ex)); } command.append(scopes); AccessToken token = null; BufferedReader reader = null; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw logger.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from.")); } builder.redirectErrorStream(true); Process process = builder.start(); reader = new BufferedReader(new InputStreamReader(process.getInputStream(), "UTF-8")); String line; StringBuilder output = new StringBuilder(); while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) { throw logger.logExceptionAsError( new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed")); } output.append(line); } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo("\"accessToken\": \"(.*?)(\"|$)", processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw logger.logExceptionAsError( new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account")); } throw logger.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw logger.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } finally { try { if (reader != null) { reader.close(); } } catch (IOException ex) { return Mono.error(logger.logExceptionAsError(new IllegalStateException(ex))); } } return Mono.just(token); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())) .map(MsalToken::new)); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(UserNamePasswordParameters.builder( new HashSet<>(request.getScopes()), username, password.toCharArray()).build())) .onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to login to acquire the last token * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (account != null) { parametersBuilder = parametersBuilder.account(account); } try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return pc.acquireToken(parameters); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Sutdio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = accessor.getCredentials("VS Code Azure", cloud); RefreshTokenParameters parameters = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential) .build(); return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parameters)).map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters parameters = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build(); Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache.")); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { return Mono.fromCallable(() -> { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw logger.logExceptionAsError( new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String regex, String input) { return input.replaceAll(regex, "****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { logger.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found"; private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String DEFAULT_PUBLIC_CACHE_FILE_NAME = "msal.cache"; private static final String DEFAULT_CONFIDENTIAL_CACHE_FILE_NAME = "msal.confidential.cache"; private static final Path DEFAULT_CACHE_FILE_PATH = Platform.isWindows() ? Paths.get(System.getProperty("user.home"), "AppData", "Local", ".IdentityService") : Paths.get(System.getProperty("user.home"), ".IdentityService"); private static final String DEFAULT_KEYCHAIN_SERVICE = "Microsoft.Developer.IdentityService"; private static final String DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT = "MSALCache"; private static final String DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT = "MSALConfidentialCache"; private static final String DEFAULT_KEYRING_NAME = "default"; private static final String DEFAULT_KEYRING_SCHEMA = "msal.cache"; private static final String DEFAULT_PUBLIC_KEYRING_ITEM_NAME = DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT; private static final String DEFAULT_CONFIDENTIAL_KEYRING_ITEM_NAME = DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT; private static final String DEFAULT_KEYRING_ATTR_NAME = "MsalClientID"; private static final String DEFAULT_KEYRING_ATTR_VALUE = "Microsoft.Developer.IdentityService"; private static final String HTTP_LOCALHOST = "http: private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String clientSecret; private final String certificatePath; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String certificatePassword, boolean isSharedTokenCacheCredential, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.clientSecret = clientSecret; this.certificatePath = certificatePath; this.certificatePassword = certificatePassword; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<PublicClientApplication>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<ConfidentialClientApplication>(() -> getConfidentialClientApplication()); } private ConfidentialClientApplication getConfidentialClientApplication() { if (clientId == null) { throw logger.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(certificatePath)); credential = ClientCredentialFactory.createFromCertificate( CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes)); } else { credential = ClientCredentialFactory.createFromCertificate( new FileInputStream(certificatePath), certificatePassword); } } catch (IOException | GeneralSecurityException e) { throw logger.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e)); } } else { throw logger.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path")); } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } if (options.isSharedTokenCacheEnabled()) { try { PersistenceSettings.Builder persistenceSettingsBuilder = PersistenceSettings.builder( DEFAULT_CONFIDENTIAL_CACHE_FILE_NAME, DEFAULT_CACHE_FILE_PATH); if (Platform.isMac()) { persistenceSettingsBuilder.setMacKeychain( DEFAULT_KEYCHAIN_SERVICE, DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT); } if (Platform.isLinux()) { try { persistenceSettingsBuilder .setLinuxKeyring(DEFAULT_KEYRING_NAME, DEFAULT_KEYRING_SCHEMA, DEFAULT_CONFIDENTIAL_KEYRING_ITEM_NAME, DEFAULT_KEYRING_ATTR_NAME, DEFAULT_KEYRING_ATTR_VALUE, null, null); applicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } catch (KeyRingAccessException e) { if (!options.getAllowUnencryptedCache()) { throw logger.logExceptionAsError(e); } persistenceSettingsBuilder.setLinuxUseUnprotectedFileAsCacheStorage(true); applicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } } } catch (Throwable t) { throw logger.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } return applicationBuilder.build(); } private PublicClientApplication getPublicClientApplication(boolean sharedTokenCacheCredential) { if (clientId == null) { throw logger.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (options.isSharedTokenCacheEnabled()) { try { PersistenceSettings.Builder persistenceSettingsBuilder = PersistenceSettings.builder( DEFAULT_PUBLIC_CACHE_FILE_NAME, DEFAULT_CACHE_FILE_PATH); if (Platform.isWindows()) { publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } else if (Platform.isMac()) { persistenceSettingsBuilder.setMacKeychain( DEFAULT_KEYCHAIN_SERVICE, DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } else if (Platform.isLinux()) { try { persistenceSettingsBuilder .setLinuxKeyring(DEFAULT_KEYRING_NAME, DEFAULT_KEYRING_SCHEMA, DEFAULT_PUBLIC_KEYRING_ITEM_NAME, DEFAULT_KEYRING_ATTR_NAME, DEFAULT_KEYRING_ATTR_VALUE, null, null); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } catch (KeyRingAccessException e) { if (!options.getAllowUnencryptedCache()) { throw logger.logExceptionAsError(e); } persistenceSettingsBuilder.setLinuxUseUnprotectedFileAsCacheStorage(true); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } } } catch (Throwable t) { String message = "Shared token cache is unavailable in this environment."; if (sharedTokenCacheCredential) { throw logger.logExceptionAsError(new CredentialUnavailableException(message, t)); } else { throw logger.logExceptionAsError(new ClientAuthenticationException(message, null, t)); } } } return publicClientApplicationBuilder.build(); } public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails = cacheAccessor.getAuthDetailsIfAvailable(); String authType = authDetails.getAuthMethod(); if (authType.equalsIgnoreCase("SP")) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if (authType.equalsIgnoreCase("DC")) { JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters parameters = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), refreshToken) .build(); return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parameters)).map(MsalToken::new)); } else { throw logger.logExceptionAsError(new CredentialUnavailableException( "IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE.")); } } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(logger.logExceptionAsError(ex)); } command.append(scopes); AccessToken token = null; BufferedReader reader = null; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw logger.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from.")); } builder.redirectErrorStream(true); Process process = builder.start(); reader = new BufferedReader(new InputStreamReader(process.getInputStream(), "UTF-8")); String line; StringBuilder output = new StringBuilder(); while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) { throw logger.logExceptionAsError( new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed")); } output.append(line); } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo("\"accessToken\": \"(.*?)(\"|$)", processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw logger.logExceptionAsError( new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account")); } throw logger.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw logger.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } finally { try { if (reader != null) { reader.close(); } } catch (IOException ex) { return Mono.error(logger.logExceptionAsError(new IllegalStateException(ex))); } } return Mono.just(token); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())) .map(MsalToken::new)); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(UserNamePasswordParameters.builder( new HashSet<>(request.getScopes()), username, password.toCharArray()).build())) .onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to login to acquire the last token * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (account != null) { parametersBuilder = parametersBuilder.account(account); } try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return pc.acquireToken(parameters); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Sutdio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = accessor.getCredentials("VS Code Azure", cloud); RefreshTokenParameters parameters = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential) .build(); return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parameters)).map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters parameters = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build(); Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache.")); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { return Mono.fromCallable(() -> { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw logger.logExceptionAsError( new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw logger.logExceptionAsError( new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String regex, String input) { return input.replaceAll(regex, "****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { logger.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } }
its needed, because we get back a CompletableFuture.
public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { URI redirectUri; try { redirectUri = new URI(String.format("http: } catch (URISyntaxException e) { return Mono.error(logger.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters parameters = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .build(); Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); }
.flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters)));
public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { URI redirectUri; try { redirectUri = new URI(HTTP_LOCALHOST + ":" + port); } catch (URISyntaxException e) { return Mono.error(logger.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters parameters = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .build(); Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found"; private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String DEFAULT_PUBLIC_CACHE_FILE_NAME = "msal.cache"; private static final String DEFAULT_CONFIDENTIAL_CACHE_FILE_NAME = "msal.confidential.cache"; private static final Path DEFAULT_CACHE_FILE_PATH = Platform.isWindows() ? Paths.get(System.getProperty("user.home"), "AppData", "Local", ".IdentityService") : Paths.get(System.getProperty("user.home"), ".IdentityService"); private static final String DEFAULT_KEYCHAIN_SERVICE = "Microsoft.Developer.IdentityService"; private static final String DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT = "MSALCache"; private static final String DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT = "MSALConfidentialCache"; private static final String DEFAULT_KEYRING_NAME = "default"; private static final String DEFAULT_KEYRING_SCHEMA = "msal.cache"; private static final String DEFAULT_PUBLIC_KEYRING_ITEM_NAME = DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT; private static final String DEFAULT_CONFIDENTIAL_KEYRING_ITEM_NAME = DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT; private static final String DEFAULT_KEYRING_ATTR_NAME = "MsalClientID"; private static final String DEFAULT_KEYRING_ATTR_VALUE = "Microsoft.Developer.IdentityService"; private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String clientSecret; private final String certificatePath; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String certificatePassword, boolean isSharedTokenCacheCredential, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.clientSecret = clientSecret; this.certificatePath = certificatePath; this.certificatePassword = certificatePassword; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<PublicClientApplication>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<ConfidentialClientApplication>(() -> getConfidentialClientApplication()); } private ConfidentialClientApplication getConfidentialClientApplication() { if (clientId == null) { throw logger.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(certificatePath)); credential = ClientCredentialFactory.createFromCertificate( CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes)); } else { credential = ClientCredentialFactory.createFromCertificate( new FileInputStream(certificatePath), certificatePassword); } } catch (IOException | GeneralSecurityException e) { throw logger.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e)); } } else { throw logger.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path")); } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } if (options.isSharedTokenCacheEnabled()) { try { PersistenceSettings.Builder persistenceSettingsBuilder = PersistenceSettings.builder( DEFAULT_CONFIDENTIAL_CACHE_FILE_NAME, DEFAULT_CACHE_FILE_PATH); if (Platform.isMac()) { persistenceSettingsBuilder.setMacKeychain( DEFAULT_KEYCHAIN_SERVICE, DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT); } if (Platform.isLinux()) { try { persistenceSettingsBuilder .setLinuxKeyring(DEFAULT_KEYRING_NAME, DEFAULT_KEYRING_SCHEMA, DEFAULT_CONFIDENTIAL_KEYRING_ITEM_NAME, DEFAULT_KEYRING_ATTR_NAME, DEFAULT_KEYRING_ATTR_VALUE, null, null); applicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } catch (KeyRingAccessException e) { if (!options.getAllowUnencryptedCache()) { throw logger.logExceptionAsError(e); } persistenceSettingsBuilder.setLinuxUseUnprotectedFileAsCacheStorage(true); applicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } } } catch (Throwable t) { throw logger.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } return applicationBuilder.build(); } private PublicClientApplication getPublicClientApplication(boolean sharedTokenCacheCredential) { if (clientId == null) { throw logger.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (options.isSharedTokenCacheEnabled()) { try { PersistenceSettings.Builder persistenceSettingsBuilder = PersistenceSettings.builder( DEFAULT_PUBLIC_CACHE_FILE_NAME, DEFAULT_CACHE_FILE_PATH); if (Platform.isWindows()) { publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } else if (Platform.isMac()) { persistenceSettingsBuilder.setMacKeychain( DEFAULT_KEYCHAIN_SERVICE, DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } else if (Platform.isLinux()) { try { persistenceSettingsBuilder .setLinuxKeyring(DEFAULT_KEYRING_NAME, DEFAULT_KEYRING_SCHEMA, DEFAULT_PUBLIC_KEYRING_ITEM_NAME, DEFAULT_KEYRING_ATTR_NAME, DEFAULT_KEYRING_ATTR_VALUE, null, null); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } catch (KeyRingAccessException e) { if (!options.getAllowUnencryptedCache()) { throw logger.logExceptionAsError(e); } persistenceSettingsBuilder.setLinuxUseUnprotectedFileAsCacheStorage(true); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } } } catch (Throwable t) { String message = "Shared token cache is unavailable in this environment."; if (sharedTokenCacheCredential) { throw logger.logExceptionAsError(new CredentialUnavailableException(message, t)); } else { throw logger.logExceptionAsError(new ClientAuthenticationException(message, null, t)); } } } return publicClientApplicationBuilder.build(); } public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails = cacheAccessor.getAuthDetailsIfAvailable(); String authType = authDetails.getAuthMethod(); if (authType.equalsIgnoreCase("SP")) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if (authType.equalsIgnoreCase("DC")) { JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters parameters = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), refreshToken) .build(); return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parameters)).map(MsalToken::new)); } else { throw logger.logExceptionAsError(new CredentialUnavailableException( "IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE.")); } } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(logger.logExceptionAsError(ex)); } command.append(scopes); AccessToken token = null; BufferedReader reader = null; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw logger.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from.")); } builder.redirectErrorStream(true); Process process = builder.start(); reader = new BufferedReader(new InputStreamReader(process.getInputStream(), "UTF-8")); String line; StringBuilder output = new StringBuilder(); while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) { throw logger.logExceptionAsError( new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed")); } output.append(line); } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo("\"accessToken\": \"(.*?)(\"|$)", processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw logger.logExceptionAsError( new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account")); } throw logger.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw logger.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } finally { try { if (reader != null) { reader.close(); } } catch (IOException ex) { return Mono.error(logger.logExceptionAsError(new IllegalStateException(ex))); } } return Mono.just(token); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())) .map(MsalToken::new)); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(UserNamePasswordParameters.builder( new HashSet<>(request.getScopes()), username, password.toCharArray()).build())) .onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to login to acquire the last token * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (account != null) { parametersBuilder = parametersBuilder.account(account); } try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return pc.acquireToken(parameters); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Sutdio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = accessor.getCredentials("VS Code Azure", cloud); RefreshTokenParameters parameters = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential) .build(); return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parameters)).map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters parameters = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build(); Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache.")); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { return Mono.fromCallable(() -> { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = connection.getResponseCode(); if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw logger.logExceptionAsError( new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String regex, String input) { return input.replaceAll(regex, "****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { logger.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found"; private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String DEFAULT_PUBLIC_CACHE_FILE_NAME = "msal.cache"; private static final String DEFAULT_CONFIDENTIAL_CACHE_FILE_NAME = "msal.confidential.cache"; private static final Path DEFAULT_CACHE_FILE_PATH = Platform.isWindows() ? Paths.get(System.getProperty("user.home"), "AppData", "Local", ".IdentityService") : Paths.get(System.getProperty("user.home"), ".IdentityService"); private static final String DEFAULT_KEYCHAIN_SERVICE = "Microsoft.Developer.IdentityService"; private static final String DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT = "MSALCache"; private static final String DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT = "MSALConfidentialCache"; private static final String DEFAULT_KEYRING_NAME = "default"; private static final String DEFAULT_KEYRING_SCHEMA = "msal.cache"; private static final String DEFAULT_PUBLIC_KEYRING_ITEM_NAME = DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT; private static final String DEFAULT_CONFIDENTIAL_KEYRING_ITEM_NAME = DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT; private static final String DEFAULT_KEYRING_ATTR_NAME = "MsalClientID"; private static final String DEFAULT_KEYRING_ATTR_VALUE = "Microsoft.Developer.IdentityService"; private static final String HTTP_LOCALHOST = "http: private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String clientSecret; private final String certificatePath; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String certificatePassword, boolean isSharedTokenCacheCredential, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.clientSecret = clientSecret; this.certificatePath = certificatePath; this.certificatePassword = certificatePassword; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<PublicClientApplication>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<ConfidentialClientApplication>(() -> getConfidentialClientApplication()); } private ConfidentialClientApplication getConfidentialClientApplication() { if (clientId == null) { throw logger.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(certificatePath)); credential = ClientCredentialFactory.createFromCertificate( CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes)); } else { credential = ClientCredentialFactory.createFromCertificate( new FileInputStream(certificatePath), certificatePassword); } } catch (IOException | GeneralSecurityException e) { throw logger.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e)); } } else { throw logger.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path")); } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } if (options.isSharedTokenCacheEnabled()) { try { PersistenceSettings.Builder persistenceSettingsBuilder = PersistenceSettings.builder( DEFAULT_CONFIDENTIAL_CACHE_FILE_NAME, DEFAULT_CACHE_FILE_PATH); if (Platform.isMac()) { persistenceSettingsBuilder.setMacKeychain( DEFAULT_KEYCHAIN_SERVICE, DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT); } if (Platform.isLinux()) { try { persistenceSettingsBuilder .setLinuxKeyring(DEFAULT_KEYRING_NAME, DEFAULT_KEYRING_SCHEMA, DEFAULT_CONFIDENTIAL_KEYRING_ITEM_NAME, DEFAULT_KEYRING_ATTR_NAME, DEFAULT_KEYRING_ATTR_VALUE, null, null); applicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } catch (KeyRingAccessException e) { if (!options.getAllowUnencryptedCache()) { throw logger.logExceptionAsError(e); } persistenceSettingsBuilder.setLinuxUseUnprotectedFileAsCacheStorage(true); applicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } } } catch (Throwable t) { throw logger.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } return applicationBuilder.build(); } private PublicClientApplication getPublicClientApplication(boolean sharedTokenCacheCredential) { if (clientId == null) { throw logger.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (options.isSharedTokenCacheEnabled()) { try { PersistenceSettings.Builder persistenceSettingsBuilder = PersistenceSettings.builder( DEFAULT_PUBLIC_CACHE_FILE_NAME, DEFAULT_CACHE_FILE_PATH); if (Platform.isWindows()) { publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } else if (Platform.isMac()) { persistenceSettingsBuilder.setMacKeychain( DEFAULT_KEYCHAIN_SERVICE, DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } else if (Platform.isLinux()) { try { persistenceSettingsBuilder .setLinuxKeyring(DEFAULT_KEYRING_NAME, DEFAULT_KEYRING_SCHEMA, DEFAULT_PUBLIC_KEYRING_ITEM_NAME, DEFAULT_KEYRING_ATTR_NAME, DEFAULT_KEYRING_ATTR_VALUE, null, null); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } catch (KeyRingAccessException e) { if (!options.getAllowUnencryptedCache()) { throw logger.logExceptionAsError(e); } persistenceSettingsBuilder.setLinuxUseUnprotectedFileAsCacheStorage(true); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } } } catch (Throwable t) { String message = "Shared token cache is unavailable in this environment."; if (sharedTokenCacheCredential) { throw logger.logExceptionAsError(new CredentialUnavailableException(message, t)); } else { throw logger.logExceptionAsError(new ClientAuthenticationException(message, null, t)); } } } return publicClientApplicationBuilder.build(); } public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails = cacheAccessor.getAuthDetailsIfAvailable(); String authType = authDetails.getAuthMethod(); if (authType.equalsIgnoreCase("SP")) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if (authType.equalsIgnoreCase("DC")) { JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters parameters = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), refreshToken) .build(); return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parameters)).map(MsalToken::new)); } else { throw logger.logExceptionAsError(new CredentialUnavailableException( "IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE.")); } } catch (IOException e) { return Mono.error(e); } } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(logger.logExceptionAsError(ex)); } command.append(scopes); AccessToken token = null; BufferedReader reader = null; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw logger.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from.")); } builder.redirectErrorStream(true); Process process = builder.start(); reader = new BufferedReader(new InputStreamReader(process.getInputStream(), "UTF-8")); String line; StringBuilder output = new StringBuilder(); while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) { throw logger.logExceptionAsError( new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed")); } output.append(line); } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo("\"accessToken\": \"(.*?)(\"|$)", processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw logger.logExceptionAsError( new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account")); } throw logger.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw logger.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } finally { try { if (reader != null) { reader.close(); } } catch (IOException ex) { return Mono.error(logger.logExceptionAsError(new IllegalStateException(ex))); } } return Mono.just(token); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())) .map(MsalToken::new)); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(UserNamePasswordParameters.builder( new HashSet<>(request.getScopes()), username, password.toCharArray()).build())) .onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to login to acquire the last token * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (account != null) { parametersBuilder = parametersBuilder.account(account); } try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return pc.acquireToken(parameters); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Sutdio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = accessor.getCredentials("VS Code Azure", cloud); RefreshTokenParameters parameters = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential) .build(); return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parameters)).map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters parameters = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build(); Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache.")); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { return Mono.fromCallable(() -> { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw logger.logExceptionAsError( new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw logger.logExceptionAsError( new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String regex, String input) { return input.replaceAll(regex, "****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { logger.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } }
same as above
public EventHubImpl withNewNamespace(Creatable<EventHubNamespace> namespaceCreatable) { this.addDependency(namespaceCreatable); if (namespaceCreatable instanceof EventHubNamespaceImpl) { EventHubNamespaceImpl namespace = ((EventHubNamespaceImpl) namespaceCreatable); this.ancestor = new Ancestors().new OneAncestor(namespace.resourceGroupName(), namespaceCreatable.name()); } return this; }
this.ancestor = new Ancestors().new OneAncestor(namespace.resourceGroupName(), namespaceCreatable.name());
public EventHubImpl withNewNamespace(Creatable<EventHubNamespace> namespaceCreatable) { this.addDependency(namespaceCreatable); if (namespaceCreatable instanceof EventHubNamespaceImpl) { EventHubNamespaceImpl namespace = ((EventHubNamespaceImpl) namespaceCreatable); this.ancestor = new Ancestors().new OneAncestor(namespace.resourceGroupName(), namespaceCreatable.name()); } else { logger.logExceptionAsError(new IllegalArgumentException("The namespaceCreatable is invalid.")); } return this; }
class EventHubImpl extends NestedResourceImpl<EventHub, EventhubInner, EventHubImpl> implements EventHub, EventHub.Definition, EventHub.Update { private Ancestors.OneAncestor ancestor; private CaptureSettings captureSettings; private StorageManager storageManager; private final ClientLogger logger = new ClientLogger(EventHubImpl.class); EventHubImpl(String name, EventhubInner inner, EventHubManager manager, StorageManager storageManager) { super(name, inner, manager); this.ancestor = new Ancestors().new OneAncestor(inner.id()); this.captureSettings = new CaptureSettings(this.inner()); this.storageManager = storageManager; } EventHubImpl(String name, EventHubManager manager, StorageManager storageManager) { super(name, new EventhubInner(), manager); this.storageManager = storageManager; this.captureSettings = new CaptureSettings(this.inner()); } @Override public String namespaceResourceGroupName() { return this.ancestor().resourceGroupName(); } @Override public String namespaceName() { return this.ancestor().ancestor1Name(); } @Override public boolean isDataCaptureEnabled() { if (this.inner().captureDescription() == null) { return false; } return Utils.toPrimitiveBoolean(this.inner().captureDescription().enabled()); } @Override public int dataCaptureWindowSizeInSeconds() { if (this.inner().captureDescription() == null) { return 0; } return Utils.toPrimitiveInt(this.inner().captureDescription().intervalInSeconds()); } @Override public int dataCaptureWindowSizeInMB() { if (this.inner().captureDescription() == null) { return 0; } int inBytes = Utils.toPrimitiveInt(this.inner().captureDescription().sizeLimitInBytes()); if (inBytes != 0) { return inBytes / (1024 * 1024); } else { return 0; } } @Override public boolean dataCaptureSkipEmptyArchives() { if (this.inner().captureDescription() == null) { return false; } return this.inner().captureDescription().skipEmptyArchives(); } @Override public String dataCaptureFileNameFormat() { if (this.inner().captureDescription() == null) { return null; } else if (this.inner().captureDescription().destination() == null) { return null; } else { return this.inner().captureDescription().destination().archiveNameFormat(); } } @Override public Destination captureDestination() { if (this.inner().captureDescription() == null) { return null; } else { return this.inner().captureDescription().destination(); } } @Override public Set<String> partitionIds() { if (this.inner().partitionIds() == null) { return Collections.unmodifiableSet(new HashSet<String>()); } else { return Collections.unmodifiableSet(new HashSet<String>(this.inner().partitionIds())); } } @Override public int messageRetentionPeriodInDays() { return Utils.toPrimitiveInt(this.inner().messageRetentionInDays()); } @Override @Override public EventHubImpl withExistingNamespace(EventHubNamespace namespace) { this.ancestor = new Ancestors().new OneAncestor(selfId(namespace.id())); return this; } @Override public EventHubImpl withExistingNamespace(String resourceGroupName, String namespaceName) { this.ancestor = new Ancestors().new OneAncestor(resourceGroupName, namespaceName); return this; } @Override public EventHubImpl withExistingNamespaceId(String namespaceId) { this.ancestor = new Ancestors().new OneAncestor(selfId(namespaceId)); return this; } @Override public EventHubImpl withNewStorageAccountForCapturedData( Creatable<StorageAccount> storageAccountCreatable, String containerName) { this.captureSettings.withNewStorageAccountForCapturedData(storageAccountCreatable, containerName); return this; } @Override public EventHubImpl withExistingStorageAccountForCapturedData( StorageAccount storageAccount, String containerName) { this.captureSettings.withExistingStorageAccountForCapturedData(storageAccount, containerName); return this; } @Override public EventHubImpl withExistingStorageAccountForCapturedData( String storageAccountId, String containerName) { this.captureSettings.withExistingStorageAccountForCapturedData(storageAccountId, containerName); return this; } @Override public EventHubImpl withDataCaptureEnabled() { this.captureSettings.withDataCaptureEnabled(); return this; } @Override public EventHubImpl withDataCaptureDisabled() { this.captureSettings.withDataCaptureDisabled(); return this; } @Override public EventHubImpl withDataCaptureWindowSizeInSeconds(int sizeInSeconds) { this.captureSettings.withDataCaptureWindowSizeInSeconds(sizeInSeconds); return this; } @Override public EventHubImpl withDataCaptureSkipEmptyArchives(Boolean skipEmptyArchives) { this.captureSettings.withDataCaptureSkipEmptyArchives(skipEmptyArchives); return this; } @Override public EventHubImpl withDataCaptureWindowSizeInMB(int sizeInMB) { this.captureSettings.withDataCaptureWindowSizeInMB(sizeInMB); return this; } @Override public EventHubImpl withDataCaptureFileNameFormat(String format) { this.captureSettings.withDataCaptureFileNameFormat(format); return this; } @Override public EventHubImpl withNewSendRule(final String ruleName) { addPostRunDependent(context -> manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withSendAccess() .createAsync() .last()); return this; } @Override public EventHubImpl withNewListenRule(final String ruleName) { addPostRunDependent(context -> manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withListenAccess() .createAsync() .last()); return this; } @Override public EventHubImpl withNewManageRule(final String ruleName) { addPostRunDependent(context -> manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withManageAccess() .createAsync() .last()); return this; } @Override public EventHubImpl withoutAuthorizationRule(final String ruleName) { addPostRunDependent(context -> manager().eventHubAuthorizationRules() .deleteByNameAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), ruleName) .then(context.voidMono())); return this; } @Override public EventHubImpl withNewConsumerGroup(final String name) { addPostRunDependent(context -> manager().consumerGroups() .define(name) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .createAsync() .last()); return this; } @Override public EventHubImpl withNewConsumerGroup(final String name, final String metadata) { addPostRunDependent(context -> manager().consumerGroups() .define(name) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withUserMetadata(metadata) .createAsync() .last()); return this; } @Override public EventHubImpl withoutConsumerGroup(final String name) { addPostRunDependent(context -> manager().consumerGroups() .deleteByNameAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), name) .then(context.voidMono())); return this; } @Override public EventHubImpl withPartitionCount(long count) { this.inner().withPartitionCount(count); return this; } @Override public EventHubImpl withRetentionPeriodInDays(long period) { this.inner().withMessageRetentionInDays(period); return this; } @Override public EventHubImpl update() { this.captureSettings = new CaptureSettings(this.inner()); return super.update(); } @Override public void beforeGroupCreateOrUpdate() { this.inner().withCaptureDescription(this.captureSettings.validateAndGetSettings()); } @Override public Mono<EventHub> createResourceAsync() { return this.manager.inner().getEventHubs() .createOrUpdateAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), this.inner()) .map(innerToFluentMap(this)); } @Override protected Mono<EventhubInner> getInnerAsync() { return this.manager.inner().getEventHubs().getAsync(this.ancestor().resourceGroupName(), this.ancestor().ancestor1Name(), this.name()); } @Override public PagedFlux<EventHubConsumerGroup> listConsumerGroupsAsync() { return this.manager.consumerGroups() .listByEventHubAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedFlux<EventHubAuthorizationRule> listAuthorizationRulesAsync() { return this.manager.eventHubAuthorizationRules() .listByEventHubAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedIterable<EventHubConsumerGroup> listConsumerGroups() { return this.manager.consumerGroups() .listByEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedIterable<EventHubAuthorizationRule> listAuthorizationRules() { return this.manager.eventHubAuthorizationRules() .listByEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } private Ancestors.OneAncestor ancestor() { Objects.requireNonNull(this.ancestor); return this.ancestor; } private String selfId(String parentId) { return String.format("%s/eventhubs/%s", parentId, this.name()); } private class CaptureSettings { private final CaptureDescription currentSettings; private CaptureDescription newSettings; CaptureSettings(final EventhubInner eventhubInner) { this.currentSettings = eventhubInner.captureDescription(); } public CaptureSettings withNewStorageAccountForCapturedData( final Creatable<StorageAccount> creatableStorageAccount, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId("temp-id"); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> creatableStorageAccount .createAsync() .last() .flatMap(indexable -> { StorageAccount storageAccount = (StorageAccount) indexable; ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); return createContainerIfNotExistsAsync(storageAccount, containerName) .flatMap(aVoid -> context.voidMono()); })); return this; } public CaptureSettings withExistingStorageAccountForCapturedData( final StorageAccount storageAccount, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> createContainerIfNotExistsAsync(storageAccount, containerName) .flatMap(aVoid -> context.voidMono())); return this; } public CaptureSettings withExistingStorageAccountForCapturedData( final String storageAccountId, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId(storageAccountId); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> storageManager.storageAccounts() .getByIdAsync(storageAccountId) .flatMap(storageAccount -> { ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); return createContainerIfNotExistsAsync(storageAccount, containerName) .flatMap(aVoid -> context.voidMono()); })); return this; } public CaptureSettings withDataCaptureEnabled() { this.ensureSettings().withEnabled(true); return this; } public CaptureSettings withDataCaptureDisabled() { this.ensureSettings().withEnabled(false); return this; } public CaptureSettings withDataCaptureSkipEmptyArchives(Boolean skipEmptyArchives) { this.ensureSettings().withSkipEmptyArchives(skipEmptyArchives); return this; } public CaptureSettings withDataCaptureWindowSizeInSeconds(int sizeInSeconds) { this.ensureSettings().withIntervalInSeconds(sizeInSeconds); return this; } public CaptureSettings withDataCaptureWindowSizeInMB(int sizeInMB) { this.ensureSettings().withSizeLimitInBytes(sizeInMB * 1024 * 1024); return this; } public CaptureSettings withDataCaptureFileNameFormat(String format) { this.ensureSettings().destination().withArchiveNameFormat(format); return this; } public CaptureDescription validateAndGetSettings() { if (this.newSettings == null) { return this.currentSettings; } else if (this.newSettings.destination() == null || this.newSettings.destination().storageAccountResourceId() == null || this.newSettings.destination().blobContainer() == null) { throw logger.logExceptionAsError(new IllegalStateException( "Setting any of the capture properties requires " + "capture destination [StorageAccount, DataLake] to be specified")); } if (this.newSettings.destination().name() == null) { this.newSettings.destination().withName("EventHubArchive.AzureBlockBlob"); } if (this.newSettings.encoding() == null) { this.newSettings.withEncoding(EncodingCaptureDescription.AVRO); } return this.newSettings; } private CaptureDescription ensureSettings() { if (this.newSettings != null) { return this.newSettings; } else if (this.currentSettings == null) { this.newSettings = new CaptureDescription().withDestination(new Destination()); return this.newSettings; } else { this.newSettings = cloneCurrentSettings(); return this.newSettings; } } private Mono<Void> createContainerIfNotExistsAsync(final StorageAccount storageAccount, final String containerName) { return getBlobClientAsync(storageAccount) .flatMap(blobServiceAsyncClient -> blobServiceAsyncClient .getBlobContainerAsyncClient(containerName) .exists() .flatMap(aBoolean -> { if (aBoolean) { return Mono.empty(); } return blobServiceAsyncClient.getBlobContainerAsyncClient(containerName).create(); })); } private Mono<BlobServiceAsyncClient> getBlobClientAsync(final StorageAccount storageAccount) { return storageAccount.getKeysAsync() .flatMapIterable(storageAccountKeys -> storageAccountKeys) .last() .map(storageAccountKey -> { BlobServiceAsyncClient blobServiceAsyncClient = new BlobServiceClientBuilder() .connectionString(Utils.getStorageConnectionString( storageAccount.name(), storageAccountKey.value(), manager().environment())) .pipeline(manager().httpPipeline()) .buildAsyncClient(); return blobServiceAsyncClient; }); } private CaptureDescription cloneCurrentSettings() { Objects.requireNonNull(this.currentSettings); CaptureDescription clone = new CaptureDescription(); clone.withSizeLimitInBytes(this.currentSettings.sizeLimitInBytes()); clone.withSkipEmptyArchives(this.currentSettings.skipEmptyArchives()); clone.withIntervalInSeconds(this.currentSettings.intervalInSeconds()); clone.withEnabled(this.currentSettings.enabled()); clone.withEncoding(this.currentSettings.encoding()); if (this.currentSettings.destination() != null) { clone.withDestination(new Destination()); clone.destination().withArchiveNameFormat(this.currentSettings.destination().archiveNameFormat()); clone.destination().withBlobContainer(this.currentSettings.destination().blobContainer()); clone.destination().withName(this.currentSettings.destination().name()); clone.destination().withStorageAccountResourceId( this.currentSettings.destination().storageAccountResourceId()); } else { clone.withDestination(new Destination()); } return clone; } } }
class EventHubImpl extends NestedResourceImpl<EventHub, EventhubInner, EventHubImpl> implements EventHub, EventHub.Definition, EventHub.Update { private Ancestors.OneAncestor ancestor; private CaptureSettings captureSettings; private StorageManager storageManager; private Flux<Indexable> postRunTasks; private final ClientLogger logger = new ClientLogger(EventHubImpl.class); EventHubImpl(String name, EventhubInner inner, EventHubsManager manager, StorageManager storageManager) { super(name, inner, manager); this.ancestor = new Ancestors().new OneAncestor(inner.id()); this.captureSettings = new CaptureSettings(this.inner()); this.storageManager = storageManager; } EventHubImpl(String name, EventHubsManager manager, StorageManager storageManager) { super(name, new EventhubInner(), manager); this.storageManager = storageManager; this.captureSettings = new CaptureSettings(this.inner()); } @Override public String namespaceResourceGroupName() { return this.ancestor().resourceGroupName(); } @Override public String namespaceName() { return this.ancestor().ancestor1Name(); } @Override public boolean isDataCaptureEnabled() { if (this.inner().captureDescription() == null) { return false; } return Utils.toPrimitiveBoolean(this.inner().captureDescription().enabled()); } @Override public int dataCaptureWindowSizeInSeconds() { if (this.inner().captureDescription() == null) { return 0; } return Utils.toPrimitiveInt(this.inner().captureDescription().intervalInSeconds()); } @Override public int dataCaptureWindowSizeInMB() { if (this.inner().captureDescription() == null) { return 0; } int inBytes = Utils.toPrimitiveInt(this.inner().captureDescription().sizeLimitInBytes()); if (inBytes != 0) { return inBytes / (1024 * 1024); } else { return 0; } } @Override public boolean dataCaptureSkipEmptyArchives() { if (this.inner().captureDescription() == null) { return false; } return this.inner().captureDescription().skipEmptyArchives(); } @Override public String dataCaptureFileNameFormat() { if (this.inner().captureDescription() == null) { return null; } else if (this.inner().captureDescription().destination() == null) { return null; } else { return this.inner().captureDescription().destination().archiveNameFormat(); } } @Override public Destination captureDestination() { if (this.inner().captureDescription() == null) { return null; } else { return this.inner().captureDescription().destination(); } } @Override public Set<String> partitionIds() { if (this.inner().partitionIds() == null) { return Collections.unmodifiableSet(new HashSet<String>()); } else { return Collections.unmodifiableSet(new HashSet<String>(this.inner().partitionIds())); } } @Override public int messageRetentionPeriodInDays() { return Utils.toPrimitiveInt(this.inner().messageRetentionInDays()); } @Override @Override public EventHubImpl withExistingNamespace(EventHubNamespace namespace) { this.ancestor = new Ancestors().new OneAncestor(selfId(namespace.id())); return this; } @Override public EventHubImpl withExistingNamespace(String resourceGroupName, String namespaceName) { this.ancestor = new Ancestors().new OneAncestor(resourceGroupName, namespaceName); return this; } @Override public EventHubImpl withExistingNamespaceId(String namespaceId) { this.ancestor = new Ancestors().new OneAncestor(selfId(namespaceId)); return this; } @Override public EventHubImpl withNewStorageAccountForCapturedData( Creatable<StorageAccount> storageAccountCreatable, String containerName) { this.captureSettings.withNewStorageAccountForCapturedData(storageAccountCreatable, containerName); return this; } @Override public EventHubImpl withExistingStorageAccountForCapturedData( StorageAccount storageAccount, String containerName) { this.captureSettings.withExistingStorageAccountForCapturedData(storageAccount, containerName); return this; } @Override public EventHubImpl withExistingStorageAccountForCapturedData( String storageAccountId, String containerName) { this.captureSettings.withExistingStorageAccountForCapturedData(storageAccountId, containerName); return this; } @Override public EventHubImpl withDataCaptureEnabled() { this.captureSettings.withDataCaptureEnabled(); return this; } @Override public EventHubImpl withDataCaptureDisabled() { this.captureSettings.withDataCaptureDisabled(); return this; } @Override public EventHubImpl withDataCaptureWindowSizeInSeconds(int sizeInSeconds) { this.captureSettings.withDataCaptureWindowSizeInSeconds(sizeInSeconds); return this; } @Override public EventHubImpl withDataCaptureSkipEmptyArchives(Boolean skipEmptyArchives) { this.captureSettings.withDataCaptureSkipEmptyArchives(skipEmptyArchives); return this; } @Override public EventHubImpl withDataCaptureWindowSizeInMB(int sizeInMB) { this.captureSettings.withDataCaptureWindowSizeInMB(sizeInMB); return this; } @Override public EventHubImpl withDataCaptureFileNameFormat(String format) { this.captureSettings.withDataCaptureFileNameFormat(format); return this; } @Override public EventHubImpl withNewSendRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withSendAccess() .createAsync() .last()); return this; } @Override public EventHubImpl withNewListenRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withListenAccess() .createAsync() .last()); return this; } @Override public EventHubImpl withNewSendAndListenRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withSendAndListenAccess() .createAsync() .last()); return this; } @Override public EventHubImpl withNewManageRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withManageAccess() .createAsync() .last()); return this; } @Override public EventHubImpl withoutAuthorizationRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .deleteByNameAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), ruleName) .map(aVoid -> new VoidIndexable(UUID.randomUUID().toString()))); return this; } @Override public EventHubImpl withNewConsumerGroup(final String name) { concatPostRunTask(manager().consumerGroups() .define(name) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .createAsync() .last()); return this; } @Override public EventHubImpl withNewConsumerGroup(final String name, final String metadata) { concatPostRunTask(manager().consumerGroups() .define(name) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withUserMetadata(metadata) .createAsync() .last()); return this; } @Override public EventHubImpl withoutConsumerGroup(final String name) { concatPostRunTask(manager().consumerGroups() .deleteByNameAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), name) .map(aVoid -> new VoidIndexable(UUID.randomUUID().toString()))); return this; } @Override public EventHubImpl withPartitionCount(long count) { this.inner().withPartitionCount(count); return this; } @Override public EventHubImpl withRetentionPeriodInDays(long period) { this.inner().withMessageRetentionInDays(period); return this; } @Override public EventHubImpl update() { this.captureSettings = new CaptureSettings(this.inner()); return super.update(); } @Override public void beforeGroupCreateOrUpdate() { if (postRunTasks != null) { addPostRunDependent(context -> postRunTasks.last()); } this.inner().withCaptureDescription(this.captureSettings.validateAndGetSettings()); } @Override public Mono<EventHub> createResourceAsync() { return this.manager.inner().getEventHubs() .createOrUpdateAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), this.inner()) .map(innerToFluentMap(this)); } @Override public Mono<Void> afterPostRunAsync(boolean isGroupFaulted) { postRunTasks = null; return Mono.empty(); } @Override protected Mono<EventhubInner> getInnerAsync() { return this.manager.inner().getEventHubs().getAsync(this.ancestor().resourceGroupName(), this.ancestor().ancestor1Name(), this.name()); } @Override public PagedFlux<EventHubConsumerGroup> listConsumerGroupsAsync() { return this.manager.consumerGroups() .listByEventHubAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedFlux<EventHubAuthorizationRule> listAuthorizationRulesAsync() { return this.manager.eventHubAuthorizationRules() .listByEventHubAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedIterable<EventHubConsumerGroup> listConsumerGroups() { return this.manager.consumerGroups() .listByEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedIterable<EventHubAuthorizationRule> listAuthorizationRules() { return this.manager.eventHubAuthorizationRules() .listByEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } private Ancestors.OneAncestor ancestor() { Objects.requireNonNull(this.ancestor); return this.ancestor; } private String selfId(String parentId) { return String.format("%s/eventhubs/%s", parentId, this.name()); } private void concatPostRunTask(Mono<Indexable> task) { if (postRunTasks == null) { postRunTasks = Flux.empty(); } postRunTasks = postRunTasks.concatWith(task); } private class CaptureSettings { private final CaptureDescription currentSettings; private CaptureDescription newSettings; CaptureSettings(final EventhubInner eventhubInner) { this.currentSettings = eventhubInner.captureDescription(); } public CaptureSettings withNewStorageAccountForCapturedData( final Creatable<StorageAccount> creatableStorageAccount, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId("temp-id"); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> creatableStorageAccount .createAsync() .last() .flatMap(indexable -> { StorageAccount storageAccount = (StorageAccount) indexable; ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); return createContainerIfNotExistsAsync(storageAccount, containerName); })); return this; } public CaptureSettings withExistingStorageAccountForCapturedData( final StorageAccount storageAccount, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> createContainerIfNotExistsAsync(storageAccount, containerName)); return this; } public CaptureSettings withExistingStorageAccountForCapturedData( final String storageAccountId, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId(storageAccountId); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> storageManager.storageAccounts() .getByIdAsync(storageAccountId) .flatMap(storageAccount -> { ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); return createContainerIfNotExistsAsync(storageAccount, containerName); })); return this; } public CaptureSettings withDataCaptureEnabled() { this.ensureSettings().withEnabled(true); return this; } public CaptureSettings withDataCaptureDisabled() { this.ensureSettings().withEnabled(false); return this; } public CaptureSettings withDataCaptureSkipEmptyArchives(Boolean skipEmptyArchives) { this.ensureSettings().withSkipEmptyArchives(skipEmptyArchives); return this; } public CaptureSettings withDataCaptureWindowSizeInSeconds(int sizeInSeconds) { this.ensureSettings().withIntervalInSeconds(sizeInSeconds); return this; } public CaptureSettings withDataCaptureWindowSizeInMB(int sizeInMB) { this.ensureSettings().withSizeLimitInBytes(sizeInMB * 1024 * 1024); return this; } public CaptureSettings withDataCaptureFileNameFormat(String format) { this.ensureSettings().destination().withArchiveNameFormat(format); return this; } public CaptureDescription validateAndGetSettings() { if (this.newSettings == null) { return this.currentSettings; } else if (this.newSettings.destination() == null || this.newSettings.destination().storageAccountResourceId() == null || this.newSettings.destination().blobContainer() == null) { throw logger.logExceptionAsError(new IllegalStateException( "Setting any of the capture properties requires " + "capture destination [StorageAccount, DataLake] to be specified")); } if (this.newSettings.destination().name() == null) { this.newSettings.destination().withName("EventHubArchive.AzureBlockBlob"); } if (this.newSettings.encoding() == null) { this.newSettings.withEncoding(EncodingCaptureDescription.AVRO); } return this.newSettings; } private CaptureDescription ensureSettings() { if (this.newSettings != null) { return this.newSettings; } else if (this.currentSettings == null) { this.newSettings = new CaptureDescription().withDestination(new Destination()); return this.newSettings; } else { this.newSettings = cloneCurrentSettings(); return this.newSettings; } } private Mono<Indexable> createContainerIfNotExistsAsync(final StorageAccount storageAccount, final String containerName) { return storageManager.blobContainers() .getAsync(storageAccount.resourceGroupName(), storageAccount.name(), containerName) .cast(Indexable.class) .onErrorResume(throwable -> storageManager.blobContainers() .defineContainer(containerName) .withExistingBlobService(storageAccount.resourceGroupName(), storageAccount.name()) .withPublicAccess(PublicAccess.CONTAINER) .createAsync() .last()); } private CaptureDescription cloneCurrentSettings() { Objects.requireNonNull(this.currentSettings); CaptureDescription clone = new CaptureDescription(); clone.withSizeLimitInBytes(this.currentSettings.sizeLimitInBytes()); clone.withSkipEmptyArchives(this.currentSettings.skipEmptyArchives()); clone.withIntervalInSeconds(this.currentSettings.intervalInSeconds()); clone.withEnabled(this.currentSettings.enabled()); clone.withEncoding(this.currentSettings.encoding()); if (this.currentSettings.destination() != null) { clone.withDestination(new Destination()); clone.destination().withArchiveNameFormat(this.currentSettings.destination().archiveNameFormat()); clone.destination().withBlobContainer(this.currentSettings.destination().blobContainer()); clone.destination().withName(this.currentSettings.destination().name()); clone.destination().withStorageAccountResourceId( this.currentSettings.destination().storageAccountResourceId()); } else { clone.withDestination(new Destination()); } return clone; } } }
Please try test case which do both (NewSend and NewListen) in one Create/Update. I remember service not able to handle concurrent requests under eventhub, so these had to be done in sequence. https://github.com/Azure/azure-libraries-for-net/issues/891
public EventHubImpl withNewListenRule(final String ruleName) { addPostRunDependent(context -> manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withListenAccess() .createAsync() .last()); return this; }
}
public EventHubImpl withNewListenRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withListenAccess() .createAsync() .last()); return this; }
class EventHubImpl extends NestedResourceImpl<EventHub, EventhubInner, EventHubImpl> implements EventHub, EventHub.Definition, EventHub.Update { private Ancestors.OneAncestor ancestor; private CaptureSettings captureSettings; private StorageManager storageManager; private final ClientLogger logger = new ClientLogger(EventHubImpl.class); EventHubImpl(String name, EventhubInner inner, EventHubsManager manager, StorageManager storageManager) { super(name, inner, manager); this.ancestor = new Ancestors().new OneAncestor(inner.id()); this.captureSettings = new CaptureSettings(this.inner()); this.storageManager = storageManager; } EventHubImpl(String name, EventHubsManager manager, StorageManager storageManager) { super(name, new EventhubInner(), manager); this.storageManager = storageManager; this.captureSettings = new CaptureSettings(this.inner()); } @Override public String namespaceResourceGroupName() { return this.ancestor().resourceGroupName(); } @Override public String namespaceName() { return this.ancestor().ancestor1Name(); } @Override public boolean isDataCaptureEnabled() { if (this.inner().captureDescription() == null) { return false; } return Utils.toPrimitiveBoolean(this.inner().captureDescription().enabled()); } @Override public int dataCaptureWindowSizeInSeconds() { if (this.inner().captureDescription() == null) { return 0; } return Utils.toPrimitiveInt(this.inner().captureDescription().intervalInSeconds()); } @Override public int dataCaptureWindowSizeInMB() { if (this.inner().captureDescription() == null) { return 0; } int inBytes = Utils.toPrimitiveInt(this.inner().captureDescription().sizeLimitInBytes()); if (inBytes != 0) { return inBytes / (1024 * 1024); } else { return 0; } } @Override public boolean dataCaptureSkipEmptyArchives() { if (this.inner().captureDescription() == null) { return false; } return this.inner().captureDescription().skipEmptyArchives(); } @Override public String dataCaptureFileNameFormat() { if (this.inner().captureDescription() == null) { return null; } else if (this.inner().captureDescription().destination() == null) { return null; } else { return this.inner().captureDescription().destination().archiveNameFormat(); } } @Override public Destination captureDestination() { if (this.inner().captureDescription() == null) { return null; } else { return this.inner().captureDescription().destination(); } } @Override public Set<String> partitionIds() { if (this.inner().partitionIds() == null) { return Collections.unmodifiableSet(new HashSet<String>()); } else { return Collections.unmodifiableSet(new HashSet<String>(this.inner().partitionIds())); } } @Override public int messageRetentionPeriodInDays() { return Utils.toPrimitiveInt(this.inner().messageRetentionInDays()); } @Override public EventHubImpl withNewNamespace(Creatable<EventHubNamespace> namespaceCreatable) { this.addDependency(namespaceCreatable); if (namespaceCreatable instanceof EventHubNamespaceImpl) { EventHubNamespaceImpl namespace = ((EventHubNamespaceImpl) namespaceCreatable); this.ancestor = new Ancestors().new OneAncestor(namespace.resourceGroupName(), namespaceCreatable.name()); } else { logger.logExceptionAsError(new IllegalArgumentException("The namespaceCreatable is invalid.")); } return this; } @Override public EventHubImpl withExistingNamespace(EventHubNamespace namespace) { this.ancestor = new Ancestors().new OneAncestor(selfId(namespace.id())); return this; } @Override public EventHubImpl withExistingNamespace(String resourceGroupName, String namespaceName) { this.ancestor = new Ancestors().new OneAncestor(resourceGroupName, namespaceName); return this; } @Override public EventHubImpl withExistingNamespaceId(String namespaceId) { this.ancestor = new Ancestors().new OneAncestor(selfId(namespaceId)); return this; } @Override public EventHubImpl withNewStorageAccountForCapturedData( Creatable<StorageAccount> storageAccountCreatable, String containerName) { this.captureSettings.withNewStorageAccountForCapturedData(storageAccountCreatable, containerName); return this; } @Override public EventHubImpl withExistingStorageAccountForCapturedData( StorageAccount storageAccount, String containerName) { this.captureSettings.withExistingStorageAccountForCapturedData(storageAccount, containerName); return this; } @Override public EventHubImpl withExistingStorageAccountForCapturedData( String storageAccountId, String containerName) { this.captureSettings.withExistingStorageAccountForCapturedData(storageAccountId, containerName); return this; } @Override public EventHubImpl withDataCaptureEnabled() { this.captureSettings.withDataCaptureEnabled(); return this; } @Override public EventHubImpl withDataCaptureDisabled() { this.captureSettings.withDataCaptureDisabled(); return this; } @Override public EventHubImpl withDataCaptureWindowSizeInSeconds(int sizeInSeconds) { this.captureSettings.withDataCaptureWindowSizeInSeconds(sizeInSeconds); return this; } @Override public EventHubImpl withDataCaptureSkipEmptyArchives(Boolean skipEmptyArchives) { this.captureSettings.withDataCaptureSkipEmptyArchives(skipEmptyArchives); return this; } @Override public EventHubImpl withDataCaptureWindowSizeInMB(int sizeInMB) { this.captureSettings.withDataCaptureWindowSizeInMB(sizeInMB); return this; } @Override public EventHubImpl withDataCaptureFileNameFormat(String format) { this.captureSettings.withDataCaptureFileNameFormat(format); return this; } @Override public EventHubImpl withNewSendRule(final String ruleName) { addPostRunDependent(context -> manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withSendAccess() .createAsync() .last()); return this; } @Override @Override public EventHubImpl withNewManageRule(final String ruleName) { addPostRunDependent(context -> manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withManageAccess() .createAsync() .last()); return this; } @Override public EventHubImpl withoutAuthorizationRule(final String ruleName) { addPostRunDependent(context -> manager().eventHubAuthorizationRules() .deleteByNameAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), ruleName) .then(context.voidMono())); return this; } @Override public EventHubImpl withNewConsumerGroup(final String name) { addPostRunDependent(context -> manager().consumerGroups() .define(name) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .createAsync() .last()); return this; } @Override public EventHubImpl withNewConsumerGroup(final String name, final String metadata) { addPostRunDependent(context -> manager().consumerGroups() .define(name) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withUserMetadata(metadata) .createAsync() .last()); return this; } @Override public EventHubImpl withoutConsumerGroup(final String name) { addPostRunDependent(context -> manager().consumerGroups() .deleteByNameAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), name) .then(context.voidMono())); return this; } @Override public EventHubImpl withPartitionCount(long count) { this.inner().withPartitionCount(count); return this; } @Override public EventHubImpl withRetentionPeriodInDays(long period) { this.inner().withMessageRetentionInDays(period); return this; } @Override public EventHubImpl update() { this.captureSettings = new CaptureSettings(this.inner()); return super.update(); } @Override public void beforeGroupCreateOrUpdate() { this.inner().withCaptureDescription(this.captureSettings.validateAndGetSettings()); } @Override public Mono<EventHub> createResourceAsync() { return this.manager.inner().getEventHubs() .createOrUpdateAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), this.inner()) .map(innerToFluentMap(this)); } @Override protected Mono<EventhubInner> getInnerAsync() { return this.manager.inner().getEventHubs().getAsync(this.ancestor().resourceGroupName(), this.ancestor().ancestor1Name(), this.name()); } @Override public PagedFlux<EventHubConsumerGroup> listConsumerGroupsAsync() { return this.manager.consumerGroups() .listByEventHubAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedFlux<EventHubAuthorizationRule> listAuthorizationRulesAsync() { return this.manager.eventHubAuthorizationRules() .listByEventHubAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedIterable<EventHubConsumerGroup> listConsumerGroups() { return this.manager.consumerGroups() .listByEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedIterable<EventHubAuthorizationRule> listAuthorizationRules() { return this.manager.eventHubAuthorizationRules() .listByEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } private Ancestors.OneAncestor ancestor() { Objects.requireNonNull(this.ancestor); return this.ancestor; } private String selfId(String parentId) { return String.format("%s/eventhubs/%s", parentId, this.name()); } private class CaptureSettings { private final CaptureDescription currentSettings; private CaptureDescription newSettings; CaptureSettings(final EventhubInner eventhubInner) { this.currentSettings = eventhubInner.captureDescription(); } public CaptureSettings withNewStorageAccountForCapturedData( final Creatable<StorageAccount> creatableStorageAccount, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId("temp-id"); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> creatableStorageAccount .createAsync() .last() .flatMap(indexable -> { StorageAccount storageAccount = (StorageAccount) indexable; ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); return createContainerIfNotExistsAsync(storageAccount, containerName); })); return this; } public CaptureSettings withExistingStorageAccountForCapturedData( final StorageAccount storageAccount, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> createContainerIfNotExistsAsync(storageAccount, containerName)); return this; } public CaptureSettings withExistingStorageAccountForCapturedData( final String storageAccountId, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId(storageAccountId); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> storageManager.storageAccounts() .getByIdAsync(storageAccountId) .flatMap(storageAccount -> { ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); return createContainerIfNotExistsAsync(storageAccount, containerName); })); return this; } public CaptureSettings withDataCaptureEnabled() { this.ensureSettings().withEnabled(true); return this; } public CaptureSettings withDataCaptureDisabled() { this.ensureSettings().withEnabled(false); return this; } public CaptureSettings withDataCaptureSkipEmptyArchives(Boolean skipEmptyArchives) { this.ensureSettings().withSkipEmptyArchives(skipEmptyArchives); return this; } public CaptureSettings withDataCaptureWindowSizeInSeconds(int sizeInSeconds) { this.ensureSettings().withIntervalInSeconds(sizeInSeconds); return this; } public CaptureSettings withDataCaptureWindowSizeInMB(int sizeInMB) { this.ensureSettings().withSizeLimitInBytes(sizeInMB * 1024 * 1024); return this; } public CaptureSettings withDataCaptureFileNameFormat(String format) { this.ensureSettings().destination().withArchiveNameFormat(format); return this; } public CaptureDescription validateAndGetSettings() { if (this.newSettings == null) { return this.currentSettings; } else if (this.newSettings.destination() == null || this.newSettings.destination().storageAccountResourceId() == null || this.newSettings.destination().blobContainer() == null) { throw logger.logExceptionAsError(new IllegalStateException( "Setting any of the capture properties requires " + "capture destination [StorageAccount, DataLake] to be specified")); } if (this.newSettings.destination().name() == null) { this.newSettings.destination().withName("EventHubArchive.AzureBlockBlob"); } if (this.newSettings.encoding() == null) { this.newSettings.withEncoding(EncodingCaptureDescription.AVRO); } return this.newSettings; } private CaptureDescription ensureSettings() { if (this.newSettings != null) { return this.newSettings; } else if (this.currentSettings == null) { this.newSettings = new CaptureDescription().withDestination(new Destination()); return this.newSettings; } else { this.newSettings = cloneCurrentSettings(); return this.newSettings; } } private Mono<Indexable> createContainerIfNotExistsAsync(final StorageAccount storageAccount, final String containerName) { return storageManager.blobContainers() .getAsync(storageAccount.resourceGroupName(), storageAccount.name(), containerName) .cast(Indexable.class) .onErrorResume(throwable -> storageManager.blobContainers() .defineContainer(containerName) .withExistingBlobService(storageAccount.resourceGroupName(), storageAccount.name()) .withPublicAccess(PublicAccess.CONTAINER) .createAsync() .last()); } private CaptureDescription cloneCurrentSettings() { Objects.requireNonNull(this.currentSettings); CaptureDescription clone = new CaptureDescription(); clone.withSizeLimitInBytes(this.currentSettings.sizeLimitInBytes()); clone.withSkipEmptyArchives(this.currentSettings.skipEmptyArchives()); clone.withIntervalInSeconds(this.currentSettings.intervalInSeconds()); clone.withEnabled(this.currentSettings.enabled()); clone.withEncoding(this.currentSettings.encoding()); if (this.currentSettings.destination() != null) { clone.withDestination(new Destination()); clone.destination().withArchiveNameFormat(this.currentSettings.destination().archiveNameFormat()); clone.destination().withBlobContainer(this.currentSettings.destination().blobContainer()); clone.destination().withName(this.currentSettings.destination().name()); clone.destination().withStorageAccountResourceId( this.currentSettings.destination().storageAccountResourceId()); } else { clone.withDestination(new Destination()); } return clone; } } }
class EventHubImpl extends NestedResourceImpl<EventHub, EventhubInner, EventHubImpl> implements EventHub, EventHub.Definition, EventHub.Update { private Ancestors.OneAncestor ancestor; private CaptureSettings captureSettings; private StorageManager storageManager; private Flux<Indexable> postRunTasks; private final ClientLogger logger = new ClientLogger(EventHubImpl.class); EventHubImpl(String name, EventhubInner inner, EventHubsManager manager, StorageManager storageManager) { super(name, inner, manager); this.ancestor = new Ancestors().new OneAncestor(inner.id()); this.captureSettings = new CaptureSettings(this.inner()); this.storageManager = storageManager; } EventHubImpl(String name, EventHubsManager manager, StorageManager storageManager) { super(name, new EventhubInner(), manager); this.storageManager = storageManager; this.captureSettings = new CaptureSettings(this.inner()); } @Override public String namespaceResourceGroupName() { return this.ancestor().resourceGroupName(); } @Override public String namespaceName() { return this.ancestor().ancestor1Name(); } @Override public boolean isDataCaptureEnabled() { if (this.inner().captureDescription() == null) { return false; } return Utils.toPrimitiveBoolean(this.inner().captureDescription().enabled()); } @Override public int dataCaptureWindowSizeInSeconds() { if (this.inner().captureDescription() == null) { return 0; } return Utils.toPrimitiveInt(this.inner().captureDescription().intervalInSeconds()); } @Override public int dataCaptureWindowSizeInMB() { if (this.inner().captureDescription() == null) { return 0; } int inBytes = Utils.toPrimitiveInt(this.inner().captureDescription().sizeLimitInBytes()); if (inBytes != 0) { return inBytes / (1024 * 1024); } else { return 0; } } @Override public boolean dataCaptureSkipEmptyArchives() { if (this.inner().captureDescription() == null) { return false; } return this.inner().captureDescription().skipEmptyArchives(); } @Override public String dataCaptureFileNameFormat() { if (this.inner().captureDescription() == null) { return null; } else if (this.inner().captureDescription().destination() == null) { return null; } else { return this.inner().captureDescription().destination().archiveNameFormat(); } } @Override public Destination captureDestination() { if (this.inner().captureDescription() == null) { return null; } else { return this.inner().captureDescription().destination(); } } @Override public Set<String> partitionIds() { if (this.inner().partitionIds() == null) { return Collections.unmodifiableSet(new HashSet<String>()); } else { return Collections.unmodifiableSet(new HashSet<String>(this.inner().partitionIds())); } } @Override public int messageRetentionPeriodInDays() { return Utils.toPrimitiveInt(this.inner().messageRetentionInDays()); } @Override public EventHubImpl withNewNamespace(Creatable<EventHubNamespace> namespaceCreatable) { this.addDependency(namespaceCreatable); if (namespaceCreatable instanceof EventHubNamespaceImpl) { EventHubNamespaceImpl namespace = ((EventHubNamespaceImpl) namespaceCreatable); this.ancestor = new Ancestors().new OneAncestor(namespace.resourceGroupName(), namespaceCreatable.name()); } else { logger.logExceptionAsError(new IllegalArgumentException("The namespaceCreatable is invalid.")); } return this; } @Override public EventHubImpl withExistingNamespace(EventHubNamespace namespace) { this.ancestor = new Ancestors().new OneAncestor(selfId(namespace.id())); return this; } @Override public EventHubImpl withExistingNamespace(String resourceGroupName, String namespaceName) { this.ancestor = new Ancestors().new OneAncestor(resourceGroupName, namespaceName); return this; } @Override public EventHubImpl withExistingNamespaceId(String namespaceId) { this.ancestor = new Ancestors().new OneAncestor(selfId(namespaceId)); return this; } @Override public EventHubImpl withNewStorageAccountForCapturedData( Creatable<StorageAccount> storageAccountCreatable, String containerName) { this.captureSettings.withNewStorageAccountForCapturedData(storageAccountCreatable, containerName); return this; } @Override public EventHubImpl withExistingStorageAccountForCapturedData( StorageAccount storageAccount, String containerName) { this.captureSettings.withExistingStorageAccountForCapturedData(storageAccount, containerName); return this; } @Override public EventHubImpl withExistingStorageAccountForCapturedData( String storageAccountId, String containerName) { this.captureSettings.withExistingStorageAccountForCapturedData(storageAccountId, containerName); return this; } @Override public EventHubImpl withDataCaptureEnabled() { this.captureSettings.withDataCaptureEnabled(); return this; } @Override public EventHubImpl withDataCaptureDisabled() { this.captureSettings.withDataCaptureDisabled(); return this; } @Override public EventHubImpl withDataCaptureWindowSizeInSeconds(int sizeInSeconds) { this.captureSettings.withDataCaptureWindowSizeInSeconds(sizeInSeconds); return this; } @Override public EventHubImpl withDataCaptureSkipEmptyArchives(Boolean skipEmptyArchives) { this.captureSettings.withDataCaptureSkipEmptyArchives(skipEmptyArchives); return this; } @Override public EventHubImpl withDataCaptureWindowSizeInMB(int sizeInMB) { this.captureSettings.withDataCaptureWindowSizeInMB(sizeInMB); return this; } @Override public EventHubImpl withDataCaptureFileNameFormat(String format) { this.captureSettings.withDataCaptureFileNameFormat(format); return this; } @Override public EventHubImpl withNewSendRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withSendAccess() .createAsync() .last()); return this; } @Override @Override public EventHubImpl withNewSendAndListenRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withSendAndListenAccess() .createAsync() .last()); return this; } @Override public EventHubImpl withNewManageRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withManageAccess() .createAsync() .last()); return this; } @Override public EventHubImpl withoutAuthorizationRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .deleteByNameAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), ruleName) .map(aVoid -> new VoidIndexable(UUID.randomUUID().toString()))); return this; } @Override public EventHubImpl withNewConsumerGroup(final String name) { concatPostRunTask(manager().consumerGroups() .define(name) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .createAsync() .last()); return this; } @Override public EventHubImpl withNewConsumerGroup(final String name, final String metadata) { concatPostRunTask(manager().consumerGroups() .define(name) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withUserMetadata(metadata) .createAsync() .last()); return this; } @Override public EventHubImpl withoutConsumerGroup(final String name) { concatPostRunTask(manager().consumerGroups() .deleteByNameAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), name) .map(aVoid -> new VoidIndexable(UUID.randomUUID().toString()))); return this; } @Override public EventHubImpl withPartitionCount(long count) { this.inner().withPartitionCount(count); return this; } @Override public EventHubImpl withRetentionPeriodInDays(long period) { this.inner().withMessageRetentionInDays(period); return this; } @Override public EventHubImpl update() { this.captureSettings = new CaptureSettings(this.inner()); return super.update(); } @Override public void beforeGroupCreateOrUpdate() { if (postRunTasks != null) { addPostRunDependent(context -> postRunTasks.last()); } this.inner().withCaptureDescription(this.captureSettings.validateAndGetSettings()); } @Override public Mono<EventHub> createResourceAsync() { return this.manager.inner().getEventHubs() .createOrUpdateAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), this.inner()) .map(innerToFluentMap(this)); } @Override public Mono<Void> afterPostRunAsync(boolean isGroupFaulted) { postRunTasks = null; return Mono.empty(); } @Override protected Mono<EventhubInner> getInnerAsync() { return this.manager.inner().getEventHubs().getAsync(this.ancestor().resourceGroupName(), this.ancestor().ancestor1Name(), this.name()); } @Override public PagedFlux<EventHubConsumerGroup> listConsumerGroupsAsync() { return this.manager.consumerGroups() .listByEventHubAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedFlux<EventHubAuthorizationRule> listAuthorizationRulesAsync() { return this.manager.eventHubAuthorizationRules() .listByEventHubAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedIterable<EventHubConsumerGroup> listConsumerGroups() { return this.manager.consumerGroups() .listByEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedIterable<EventHubAuthorizationRule> listAuthorizationRules() { return this.manager.eventHubAuthorizationRules() .listByEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } private Ancestors.OneAncestor ancestor() { Objects.requireNonNull(this.ancestor); return this.ancestor; } private String selfId(String parentId) { return String.format("%s/eventhubs/%s", parentId, this.name()); } private void concatPostRunTask(Mono<Indexable> task) { if (postRunTasks == null) { postRunTasks = Flux.empty(); } postRunTasks = postRunTasks.concatWith(task); } private class CaptureSettings { private final CaptureDescription currentSettings; private CaptureDescription newSettings; CaptureSettings(final EventhubInner eventhubInner) { this.currentSettings = eventhubInner.captureDescription(); } public CaptureSettings withNewStorageAccountForCapturedData( final Creatable<StorageAccount> creatableStorageAccount, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId("temp-id"); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> creatableStorageAccount .createAsync() .last() .flatMap(indexable -> { StorageAccount storageAccount = (StorageAccount) indexable; ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); return createContainerIfNotExistsAsync(storageAccount, containerName); })); return this; } public CaptureSettings withExistingStorageAccountForCapturedData( final StorageAccount storageAccount, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> createContainerIfNotExistsAsync(storageAccount, containerName)); return this; } public CaptureSettings withExistingStorageAccountForCapturedData( final String storageAccountId, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId(storageAccountId); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> storageManager.storageAccounts() .getByIdAsync(storageAccountId) .flatMap(storageAccount -> { ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); return createContainerIfNotExistsAsync(storageAccount, containerName); })); return this; } public CaptureSettings withDataCaptureEnabled() { this.ensureSettings().withEnabled(true); return this; } public CaptureSettings withDataCaptureDisabled() { this.ensureSettings().withEnabled(false); return this; } public CaptureSettings withDataCaptureSkipEmptyArchives(Boolean skipEmptyArchives) { this.ensureSettings().withSkipEmptyArchives(skipEmptyArchives); return this; } public CaptureSettings withDataCaptureWindowSizeInSeconds(int sizeInSeconds) { this.ensureSettings().withIntervalInSeconds(sizeInSeconds); return this; } public CaptureSettings withDataCaptureWindowSizeInMB(int sizeInMB) { this.ensureSettings().withSizeLimitInBytes(sizeInMB * 1024 * 1024); return this; } public CaptureSettings withDataCaptureFileNameFormat(String format) { this.ensureSettings().destination().withArchiveNameFormat(format); return this; } public CaptureDescription validateAndGetSettings() { if (this.newSettings == null) { return this.currentSettings; } else if (this.newSettings.destination() == null || this.newSettings.destination().storageAccountResourceId() == null || this.newSettings.destination().blobContainer() == null) { throw logger.logExceptionAsError(new IllegalStateException( "Setting any of the capture properties requires " + "capture destination [StorageAccount, DataLake] to be specified")); } if (this.newSettings.destination().name() == null) { this.newSettings.destination().withName("EventHubArchive.AzureBlockBlob"); } if (this.newSettings.encoding() == null) { this.newSettings.withEncoding(EncodingCaptureDescription.AVRO); } return this.newSettings; } private CaptureDescription ensureSettings() { if (this.newSettings != null) { return this.newSettings; } else if (this.currentSettings == null) { this.newSettings = new CaptureDescription().withDestination(new Destination()); return this.newSettings; } else { this.newSettings = cloneCurrentSettings(); return this.newSettings; } } private Mono<Indexable> createContainerIfNotExistsAsync(final StorageAccount storageAccount, final String containerName) { return storageManager.blobContainers() .getAsync(storageAccount.resourceGroupName(), storageAccount.name(), containerName) .cast(Indexable.class) .onErrorResume(throwable -> storageManager.blobContainers() .defineContainer(containerName) .withExistingBlobService(storageAccount.resourceGroupName(), storageAccount.name()) .withPublicAccess(PublicAccess.CONTAINER) .createAsync() .last()); } private CaptureDescription cloneCurrentSettings() { Objects.requireNonNull(this.currentSettings); CaptureDescription clone = new CaptureDescription(); clone.withSizeLimitInBytes(this.currentSettings.sizeLimitInBytes()); clone.withSkipEmptyArchives(this.currentSettings.skipEmptyArchives()); clone.withIntervalInSeconds(this.currentSettings.intervalInSeconds()); clone.withEnabled(this.currentSettings.enabled()); clone.withEncoding(this.currentSettings.encoding()); if (this.currentSettings.destination() != null) { clone.withDestination(new Destination()); clone.destination().withArchiveNameFormat(this.currentSettings.destination().archiveNameFormat()); clone.destination().withBlobContainer(this.currentSettings.destination().blobContainer()); clone.destination().withName(this.currentSettings.destination().name()); clone.destination().withStorageAccountResourceId( this.currentSettings.destination().storageAccountResourceId()); } else { clone.withDestination(new Destination()); } return clone; } } }
Do they have `without` method?
public EventHubImpl withNewListenRule(final String ruleName) { addPostRunDependent(context -> manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withListenAccess() .createAsync() .last()); return this; }
}
public EventHubImpl withNewListenRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withListenAccess() .createAsync() .last()); return this; }
class EventHubImpl extends NestedResourceImpl<EventHub, EventhubInner, EventHubImpl> implements EventHub, EventHub.Definition, EventHub.Update { private Ancestors.OneAncestor ancestor; private CaptureSettings captureSettings; private StorageManager storageManager; private final ClientLogger logger = new ClientLogger(EventHubImpl.class); EventHubImpl(String name, EventhubInner inner, EventHubsManager manager, StorageManager storageManager) { super(name, inner, manager); this.ancestor = new Ancestors().new OneAncestor(inner.id()); this.captureSettings = new CaptureSettings(this.inner()); this.storageManager = storageManager; } EventHubImpl(String name, EventHubsManager manager, StorageManager storageManager) { super(name, new EventhubInner(), manager); this.storageManager = storageManager; this.captureSettings = new CaptureSettings(this.inner()); } @Override public String namespaceResourceGroupName() { return this.ancestor().resourceGroupName(); } @Override public String namespaceName() { return this.ancestor().ancestor1Name(); } @Override public boolean isDataCaptureEnabled() { if (this.inner().captureDescription() == null) { return false; } return Utils.toPrimitiveBoolean(this.inner().captureDescription().enabled()); } @Override public int dataCaptureWindowSizeInSeconds() { if (this.inner().captureDescription() == null) { return 0; } return Utils.toPrimitiveInt(this.inner().captureDescription().intervalInSeconds()); } @Override public int dataCaptureWindowSizeInMB() { if (this.inner().captureDescription() == null) { return 0; } int inBytes = Utils.toPrimitiveInt(this.inner().captureDescription().sizeLimitInBytes()); if (inBytes != 0) { return inBytes / (1024 * 1024); } else { return 0; } } @Override public boolean dataCaptureSkipEmptyArchives() { if (this.inner().captureDescription() == null) { return false; } return this.inner().captureDescription().skipEmptyArchives(); } @Override public String dataCaptureFileNameFormat() { if (this.inner().captureDescription() == null) { return null; } else if (this.inner().captureDescription().destination() == null) { return null; } else { return this.inner().captureDescription().destination().archiveNameFormat(); } } @Override public Destination captureDestination() { if (this.inner().captureDescription() == null) { return null; } else { return this.inner().captureDescription().destination(); } } @Override public Set<String> partitionIds() { if (this.inner().partitionIds() == null) { return Collections.unmodifiableSet(new HashSet<String>()); } else { return Collections.unmodifiableSet(new HashSet<String>(this.inner().partitionIds())); } } @Override public int messageRetentionPeriodInDays() { return Utils.toPrimitiveInt(this.inner().messageRetentionInDays()); } @Override public EventHubImpl withNewNamespace(Creatable<EventHubNamespace> namespaceCreatable) { this.addDependency(namespaceCreatable); if (namespaceCreatable instanceof EventHubNamespaceImpl) { EventHubNamespaceImpl namespace = ((EventHubNamespaceImpl) namespaceCreatable); this.ancestor = new Ancestors().new OneAncestor(namespace.resourceGroupName(), namespaceCreatable.name()); } else { logger.logExceptionAsError(new IllegalArgumentException("The namespaceCreatable is invalid.")); } return this; } @Override public EventHubImpl withExistingNamespace(EventHubNamespace namespace) { this.ancestor = new Ancestors().new OneAncestor(selfId(namespace.id())); return this; } @Override public EventHubImpl withExistingNamespace(String resourceGroupName, String namespaceName) { this.ancestor = new Ancestors().new OneAncestor(resourceGroupName, namespaceName); return this; } @Override public EventHubImpl withExistingNamespaceId(String namespaceId) { this.ancestor = new Ancestors().new OneAncestor(selfId(namespaceId)); return this; } @Override public EventHubImpl withNewStorageAccountForCapturedData( Creatable<StorageAccount> storageAccountCreatable, String containerName) { this.captureSettings.withNewStorageAccountForCapturedData(storageAccountCreatable, containerName); return this; } @Override public EventHubImpl withExistingStorageAccountForCapturedData( StorageAccount storageAccount, String containerName) { this.captureSettings.withExistingStorageAccountForCapturedData(storageAccount, containerName); return this; } @Override public EventHubImpl withExistingStorageAccountForCapturedData( String storageAccountId, String containerName) { this.captureSettings.withExistingStorageAccountForCapturedData(storageAccountId, containerName); return this; } @Override public EventHubImpl withDataCaptureEnabled() { this.captureSettings.withDataCaptureEnabled(); return this; } @Override public EventHubImpl withDataCaptureDisabled() { this.captureSettings.withDataCaptureDisabled(); return this; } @Override public EventHubImpl withDataCaptureWindowSizeInSeconds(int sizeInSeconds) { this.captureSettings.withDataCaptureWindowSizeInSeconds(sizeInSeconds); return this; } @Override public EventHubImpl withDataCaptureSkipEmptyArchives(Boolean skipEmptyArchives) { this.captureSettings.withDataCaptureSkipEmptyArchives(skipEmptyArchives); return this; } @Override public EventHubImpl withDataCaptureWindowSizeInMB(int sizeInMB) { this.captureSettings.withDataCaptureWindowSizeInMB(sizeInMB); return this; } @Override public EventHubImpl withDataCaptureFileNameFormat(String format) { this.captureSettings.withDataCaptureFileNameFormat(format); return this; } @Override public EventHubImpl withNewSendRule(final String ruleName) { addPostRunDependent(context -> manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withSendAccess() .createAsync() .last()); return this; } @Override @Override public EventHubImpl withNewManageRule(final String ruleName) { addPostRunDependent(context -> manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withManageAccess() .createAsync() .last()); return this; } @Override public EventHubImpl withoutAuthorizationRule(final String ruleName) { addPostRunDependent(context -> manager().eventHubAuthorizationRules() .deleteByNameAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), ruleName) .then(context.voidMono())); return this; } @Override public EventHubImpl withNewConsumerGroup(final String name) { addPostRunDependent(context -> manager().consumerGroups() .define(name) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .createAsync() .last()); return this; } @Override public EventHubImpl withNewConsumerGroup(final String name, final String metadata) { addPostRunDependent(context -> manager().consumerGroups() .define(name) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withUserMetadata(metadata) .createAsync() .last()); return this; } @Override public EventHubImpl withoutConsumerGroup(final String name) { addPostRunDependent(context -> manager().consumerGroups() .deleteByNameAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), name) .then(context.voidMono())); return this; } @Override public EventHubImpl withPartitionCount(long count) { this.inner().withPartitionCount(count); return this; } @Override public EventHubImpl withRetentionPeriodInDays(long period) { this.inner().withMessageRetentionInDays(period); return this; } @Override public EventHubImpl update() { this.captureSettings = new CaptureSettings(this.inner()); return super.update(); } @Override public void beforeGroupCreateOrUpdate() { this.inner().withCaptureDescription(this.captureSettings.validateAndGetSettings()); } @Override public Mono<EventHub> createResourceAsync() { return this.manager.inner().getEventHubs() .createOrUpdateAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), this.inner()) .map(innerToFluentMap(this)); } @Override protected Mono<EventhubInner> getInnerAsync() { return this.manager.inner().getEventHubs().getAsync(this.ancestor().resourceGroupName(), this.ancestor().ancestor1Name(), this.name()); } @Override public PagedFlux<EventHubConsumerGroup> listConsumerGroupsAsync() { return this.manager.consumerGroups() .listByEventHubAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedFlux<EventHubAuthorizationRule> listAuthorizationRulesAsync() { return this.manager.eventHubAuthorizationRules() .listByEventHubAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedIterable<EventHubConsumerGroup> listConsumerGroups() { return this.manager.consumerGroups() .listByEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedIterable<EventHubAuthorizationRule> listAuthorizationRules() { return this.manager.eventHubAuthorizationRules() .listByEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } private Ancestors.OneAncestor ancestor() { Objects.requireNonNull(this.ancestor); return this.ancestor; } private String selfId(String parentId) { return String.format("%s/eventhubs/%s", parentId, this.name()); } private class CaptureSettings { private final CaptureDescription currentSettings; private CaptureDescription newSettings; CaptureSettings(final EventhubInner eventhubInner) { this.currentSettings = eventhubInner.captureDescription(); } public CaptureSettings withNewStorageAccountForCapturedData( final Creatable<StorageAccount> creatableStorageAccount, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId("temp-id"); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> creatableStorageAccount .createAsync() .last() .flatMap(indexable -> { StorageAccount storageAccount = (StorageAccount) indexable; ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); return createContainerIfNotExistsAsync(storageAccount, containerName); })); return this; } public CaptureSettings withExistingStorageAccountForCapturedData( final StorageAccount storageAccount, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> createContainerIfNotExistsAsync(storageAccount, containerName)); return this; } public CaptureSettings withExistingStorageAccountForCapturedData( final String storageAccountId, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId(storageAccountId); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> storageManager.storageAccounts() .getByIdAsync(storageAccountId) .flatMap(storageAccount -> { ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); return createContainerIfNotExistsAsync(storageAccount, containerName); })); return this; } public CaptureSettings withDataCaptureEnabled() { this.ensureSettings().withEnabled(true); return this; } public CaptureSettings withDataCaptureDisabled() { this.ensureSettings().withEnabled(false); return this; } public CaptureSettings withDataCaptureSkipEmptyArchives(Boolean skipEmptyArchives) { this.ensureSettings().withSkipEmptyArchives(skipEmptyArchives); return this; } public CaptureSettings withDataCaptureWindowSizeInSeconds(int sizeInSeconds) { this.ensureSettings().withIntervalInSeconds(sizeInSeconds); return this; } public CaptureSettings withDataCaptureWindowSizeInMB(int sizeInMB) { this.ensureSettings().withSizeLimitInBytes(sizeInMB * 1024 * 1024); return this; } public CaptureSettings withDataCaptureFileNameFormat(String format) { this.ensureSettings().destination().withArchiveNameFormat(format); return this; } public CaptureDescription validateAndGetSettings() { if (this.newSettings == null) { return this.currentSettings; } else if (this.newSettings.destination() == null || this.newSettings.destination().storageAccountResourceId() == null || this.newSettings.destination().blobContainer() == null) { throw logger.logExceptionAsError(new IllegalStateException( "Setting any of the capture properties requires " + "capture destination [StorageAccount, DataLake] to be specified")); } if (this.newSettings.destination().name() == null) { this.newSettings.destination().withName("EventHubArchive.AzureBlockBlob"); } if (this.newSettings.encoding() == null) { this.newSettings.withEncoding(EncodingCaptureDescription.AVRO); } return this.newSettings; } private CaptureDescription ensureSettings() { if (this.newSettings != null) { return this.newSettings; } else if (this.currentSettings == null) { this.newSettings = new CaptureDescription().withDestination(new Destination()); return this.newSettings; } else { this.newSettings = cloneCurrentSettings(); return this.newSettings; } } private Mono<Indexable> createContainerIfNotExistsAsync(final StorageAccount storageAccount, final String containerName) { return storageManager.blobContainers() .getAsync(storageAccount.resourceGroupName(), storageAccount.name(), containerName) .cast(Indexable.class) .onErrorResume(throwable -> storageManager.blobContainers() .defineContainer(containerName) .withExistingBlobService(storageAccount.resourceGroupName(), storageAccount.name()) .withPublicAccess(PublicAccess.CONTAINER) .createAsync() .last()); } private CaptureDescription cloneCurrentSettings() { Objects.requireNonNull(this.currentSettings); CaptureDescription clone = new CaptureDescription(); clone.withSizeLimitInBytes(this.currentSettings.sizeLimitInBytes()); clone.withSkipEmptyArchives(this.currentSettings.skipEmptyArchives()); clone.withIntervalInSeconds(this.currentSettings.intervalInSeconds()); clone.withEnabled(this.currentSettings.enabled()); clone.withEncoding(this.currentSettings.encoding()); if (this.currentSettings.destination() != null) { clone.withDestination(new Destination()); clone.destination().withArchiveNameFormat(this.currentSettings.destination().archiveNameFormat()); clone.destination().withBlobContainer(this.currentSettings.destination().blobContainer()); clone.destination().withName(this.currentSettings.destination().name()); clone.destination().withStorageAccountResourceId( this.currentSettings.destination().storageAccountResourceId()); } else { clone.withDestination(new Destination()); } return clone; } } }
class EventHubImpl extends NestedResourceImpl<EventHub, EventhubInner, EventHubImpl> implements EventHub, EventHub.Definition, EventHub.Update { private Ancestors.OneAncestor ancestor; private CaptureSettings captureSettings; private StorageManager storageManager; private Flux<Indexable> postRunTasks; private final ClientLogger logger = new ClientLogger(EventHubImpl.class); EventHubImpl(String name, EventhubInner inner, EventHubsManager manager, StorageManager storageManager) { super(name, inner, manager); this.ancestor = new Ancestors().new OneAncestor(inner.id()); this.captureSettings = new CaptureSettings(this.inner()); this.storageManager = storageManager; } EventHubImpl(String name, EventHubsManager manager, StorageManager storageManager) { super(name, new EventhubInner(), manager); this.storageManager = storageManager; this.captureSettings = new CaptureSettings(this.inner()); } @Override public String namespaceResourceGroupName() { return this.ancestor().resourceGroupName(); } @Override public String namespaceName() { return this.ancestor().ancestor1Name(); } @Override public boolean isDataCaptureEnabled() { if (this.inner().captureDescription() == null) { return false; } return Utils.toPrimitiveBoolean(this.inner().captureDescription().enabled()); } @Override public int dataCaptureWindowSizeInSeconds() { if (this.inner().captureDescription() == null) { return 0; } return Utils.toPrimitiveInt(this.inner().captureDescription().intervalInSeconds()); } @Override public int dataCaptureWindowSizeInMB() { if (this.inner().captureDescription() == null) { return 0; } int inBytes = Utils.toPrimitiveInt(this.inner().captureDescription().sizeLimitInBytes()); if (inBytes != 0) { return inBytes / (1024 * 1024); } else { return 0; } } @Override public boolean dataCaptureSkipEmptyArchives() { if (this.inner().captureDescription() == null) { return false; } return this.inner().captureDescription().skipEmptyArchives(); } @Override public String dataCaptureFileNameFormat() { if (this.inner().captureDescription() == null) { return null; } else if (this.inner().captureDescription().destination() == null) { return null; } else { return this.inner().captureDescription().destination().archiveNameFormat(); } } @Override public Destination captureDestination() { if (this.inner().captureDescription() == null) { return null; } else { return this.inner().captureDescription().destination(); } } @Override public Set<String> partitionIds() { if (this.inner().partitionIds() == null) { return Collections.unmodifiableSet(new HashSet<String>()); } else { return Collections.unmodifiableSet(new HashSet<String>(this.inner().partitionIds())); } } @Override public int messageRetentionPeriodInDays() { return Utils.toPrimitiveInt(this.inner().messageRetentionInDays()); } @Override public EventHubImpl withNewNamespace(Creatable<EventHubNamespace> namespaceCreatable) { this.addDependency(namespaceCreatable); if (namespaceCreatable instanceof EventHubNamespaceImpl) { EventHubNamespaceImpl namespace = ((EventHubNamespaceImpl) namespaceCreatable); this.ancestor = new Ancestors().new OneAncestor(namespace.resourceGroupName(), namespaceCreatable.name()); } else { logger.logExceptionAsError(new IllegalArgumentException("The namespaceCreatable is invalid.")); } return this; } @Override public EventHubImpl withExistingNamespace(EventHubNamespace namespace) { this.ancestor = new Ancestors().new OneAncestor(selfId(namespace.id())); return this; } @Override public EventHubImpl withExistingNamespace(String resourceGroupName, String namespaceName) { this.ancestor = new Ancestors().new OneAncestor(resourceGroupName, namespaceName); return this; } @Override public EventHubImpl withExistingNamespaceId(String namespaceId) { this.ancestor = new Ancestors().new OneAncestor(selfId(namespaceId)); return this; } @Override public EventHubImpl withNewStorageAccountForCapturedData( Creatable<StorageAccount> storageAccountCreatable, String containerName) { this.captureSettings.withNewStorageAccountForCapturedData(storageAccountCreatable, containerName); return this; } @Override public EventHubImpl withExistingStorageAccountForCapturedData( StorageAccount storageAccount, String containerName) { this.captureSettings.withExistingStorageAccountForCapturedData(storageAccount, containerName); return this; } @Override public EventHubImpl withExistingStorageAccountForCapturedData( String storageAccountId, String containerName) { this.captureSettings.withExistingStorageAccountForCapturedData(storageAccountId, containerName); return this; } @Override public EventHubImpl withDataCaptureEnabled() { this.captureSettings.withDataCaptureEnabled(); return this; } @Override public EventHubImpl withDataCaptureDisabled() { this.captureSettings.withDataCaptureDisabled(); return this; } @Override public EventHubImpl withDataCaptureWindowSizeInSeconds(int sizeInSeconds) { this.captureSettings.withDataCaptureWindowSizeInSeconds(sizeInSeconds); return this; } @Override public EventHubImpl withDataCaptureSkipEmptyArchives(Boolean skipEmptyArchives) { this.captureSettings.withDataCaptureSkipEmptyArchives(skipEmptyArchives); return this; } @Override public EventHubImpl withDataCaptureWindowSizeInMB(int sizeInMB) { this.captureSettings.withDataCaptureWindowSizeInMB(sizeInMB); return this; } @Override public EventHubImpl withDataCaptureFileNameFormat(String format) { this.captureSettings.withDataCaptureFileNameFormat(format); return this; } @Override public EventHubImpl withNewSendRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withSendAccess() .createAsync() .last()); return this; } @Override @Override public EventHubImpl withNewSendAndListenRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withSendAndListenAccess() .createAsync() .last()); return this; } @Override public EventHubImpl withNewManageRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withManageAccess() .createAsync() .last()); return this; } @Override public EventHubImpl withoutAuthorizationRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .deleteByNameAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), ruleName) .map(aVoid -> new VoidIndexable(UUID.randomUUID().toString()))); return this; } @Override public EventHubImpl withNewConsumerGroup(final String name) { concatPostRunTask(manager().consumerGroups() .define(name) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .createAsync() .last()); return this; } @Override public EventHubImpl withNewConsumerGroup(final String name, final String metadata) { concatPostRunTask(manager().consumerGroups() .define(name) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withUserMetadata(metadata) .createAsync() .last()); return this; } @Override public EventHubImpl withoutConsumerGroup(final String name) { concatPostRunTask(manager().consumerGroups() .deleteByNameAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), name) .map(aVoid -> new VoidIndexable(UUID.randomUUID().toString()))); return this; } @Override public EventHubImpl withPartitionCount(long count) { this.inner().withPartitionCount(count); return this; } @Override public EventHubImpl withRetentionPeriodInDays(long period) { this.inner().withMessageRetentionInDays(period); return this; } @Override public EventHubImpl update() { this.captureSettings = new CaptureSettings(this.inner()); return super.update(); } @Override public void beforeGroupCreateOrUpdate() { if (postRunTasks != null) { addPostRunDependent(context -> postRunTasks.last()); } this.inner().withCaptureDescription(this.captureSettings.validateAndGetSettings()); } @Override public Mono<EventHub> createResourceAsync() { return this.manager.inner().getEventHubs() .createOrUpdateAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), this.inner()) .map(innerToFluentMap(this)); } @Override public Mono<Void> afterPostRunAsync(boolean isGroupFaulted) { postRunTasks = null; return Mono.empty(); } @Override protected Mono<EventhubInner> getInnerAsync() { return this.manager.inner().getEventHubs().getAsync(this.ancestor().resourceGroupName(), this.ancestor().ancestor1Name(), this.name()); } @Override public PagedFlux<EventHubConsumerGroup> listConsumerGroupsAsync() { return this.manager.consumerGroups() .listByEventHubAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedFlux<EventHubAuthorizationRule> listAuthorizationRulesAsync() { return this.manager.eventHubAuthorizationRules() .listByEventHubAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedIterable<EventHubConsumerGroup> listConsumerGroups() { return this.manager.consumerGroups() .listByEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedIterable<EventHubAuthorizationRule> listAuthorizationRules() { return this.manager.eventHubAuthorizationRules() .listByEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } private Ancestors.OneAncestor ancestor() { Objects.requireNonNull(this.ancestor); return this.ancestor; } private String selfId(String parentId) { return String.format("%s/eventhubs/%s", parentId, this.name()); } private void concatPostRunTask(Mono<Indexable> task) { if (postRunTasks == null) { postRunTasks = Flux.empty(); } postRunTasks = postRunTasks.concatWith(task); } private class CaptureSettings { private final CaptureDescription currentSettings; private CaptureDescription newSettings; CaptureSettings(final EventhubInner eventhubInner) { this.currentSettings = eventhubInner.captureDescription(); } public CaptureSettings withNewStorageAccountForCapturedData( final Creatable<StorageAccount> creatableStorageAccount, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId("temp-id"); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> creatableStorageAccount .createAsync() .last() .flatMap(indexable -> { StorageAccount storageAccount = (StorageAccount) indexable; ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); return createContainerIfNotExistsAsync(storageAccount, containerName); })); return this; } public CaptureSettings withExistingStorageAccountForCapturedData( final StorageAccount storageAccount, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> createContainerIfNotExistsAsync(storageAccount, containerName)); return this; } public CaptureSettings withExistingStorageAccountForCapturedData( final String storageAccountId, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId(storageAccountId); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> storageManager.storageAccounts() .getByIdAsync(storageAccountId) .flatMap(storageAccount -> { ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); return createContainerIfNotExistsAsync(storageAccount, containerName); })); return this; } public CaptureSettings withDataCaptureEnabled() { this.ensureSettings().withEnabled(true); return this; } public CaptureSettings withDataCaptureDisabled() { this.ensureSettings().withEnabled(false); return this; } public CaptureSettings withDataCaptureSkipEmptyArchives(Boolean skipEmptyArchives) { this.ensureSettings().withSkipEmptyArchives(skipEmptyArchives); return this; } public CaptureSettings withDataCaptureWindowSizeInSeconds(int sizeInSeconds) { this.ensureSettings().withIntervalInSeconds(sizeInSeconds); return this; } public CaptureSettings withDataCaptureWindowSizeInMB(int sizeInMB) { this.ensureSettings().withSizeLimitInBytes(sizeInMB * 1024 * 1024); return this; } public CaptureSettings withDataCaptureFileNameFormat(String format) { this.ensureSettings().destination().withArchiveNameFormat(format); return this; } public CaptureDescription validateAndGetSettings() { if (this.newSettings == null) { return this.currentSettings; } else if (this.newSettings.destination() == null || this.newSettings.destination().storageAccountResourceId() == null || this.newSettings.destination().blobContainer() == null) { throw logger.logExceptionAsError(new IllegalStateException( "Setting any of the capture properties requires " + "capture destination [StorageAccount, DataLake] to be specified")); } if (this.newSettings.destination().name() == null) { this.newSettings.destination().withName("EventHubArchive.AzureBlockBlob"); } if (this.newSettings.encoding() == null) { this.newSettings.withEncoding(EncodingCaptureDescription.AVRO); } return this.newSettings; } private CaptureDescription ensureSettings() { if (this.newSettings != null) { return this.newSettings; } else if (this.currentSettings == null) { this.newSettings = new CaptureDescription().withDestination(new Destination()); return this.newSettings; } else { this.newSettings = cloneCurrentSettings(); return this.newSettings; } } private Mono<Indexable> createContainerIfNotExistsAsync(final StorageAccount storageAccount, final String containerName) { return storageManager.blobContainers() .getAsync(storageAccount.resourceGroupName(), storageAccount.name(), containerName) .cast(Indexable.class) .onErrorResume(throwable -> storageManager.blobContainers() .defineContainer(containerName) .withExistingBlobService(storageAccount.resourceGroupName(), storageAccount.name()) .withPublicAccess(PublicAccess.CONTAINER) .createAsync() .last()); } private CaptureDescription cloneCurrentSettings() { Objects.requireNonNull(this.currentSettings); CaptureDescription clone = new CaptureDescription(); clone.withSizeLimitInBytes(this.currentSettings.sizeLimitInBytes()); clone.withSkipEmptyArchives(this.currentSettings.skipEmptyArchives()); clone.withIntervalInSeconds(this.currentSettings.intervalInSeconds()); clone.withEnabled(this.currentSettings.enabled()); clone.withEncoding(this.currentSettings.encoding()); if (this.currentSettings.destination() != null) { clone.withDestination(new Destination()); clone.destination().withArchiveNameFormat(this.currentSettings.destination().archiveNameFormat()); clone.destination().withBlobContainer(this.currentSettings.destination().blobContainer()); clone.destination().withName(this.currentSettings.destination().name()); clone.destination().withStorageAccountResourceId( this.currentSettings.destination().storageAccountResourceId()); } else { clone.withDestination(new Destination()); } return clone; } } }
I will add new method `withNewSendAndListenRule(ruleName)`. There is `withoutAuthorizationRule(ruleName)` to remove the authorization rule no matter what access it has.
public EventHubImpl withNewListenRule(final String ruleName) { addPostRunDependent(context -> manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withListenAccess() .createAsync() .last()); return this; }
}
public EventHubImpl withNewListenRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withListenAccess() .createAsync() .last()); return this; }
class EventHubImpl extends NestedResourceImpl<EventHub, EventhubInner, EventHubImpl> implements EventHub, EventHub.Definition, EventHub.Update { private Ancestors.OneAncestor ancestor; private CaptureSettings captureSettings; private StorageManager storageManager; private final ClientLogger logger = new ClientLogger(EventHubImpl.class); EventHubImpl(String name, EventhubInner inner, EventHubsManager manager, StorageManager storageManager) { super(name, inner, manager); this.ancestor = new Ancestors().new OneAncestor(inner.id()); this.captureSettings = new CaptureSettings(this.inner()); this.storageManager = storageManager; } EventHubImpl(String name, EventHubsManager manager, StorageManager storageManager) { super(name, new EventhubInner(), manager); this.storageManager = storageManager; this.captureSettings = new CaptureSettings(this.inner()); } @Override public String namespaceResourceGroupName() { return this.ancestor().resourceGroupName(); } @Override public String namespaceName() { return this.ancestor().ancestor1Name(); } @Override public boolean isDataCaptureEnabled() { if (this.inner().captureDescription() == null) { return false; } return Utils.toPrimitiveBoolean(this.inner().captureDescription().enabled()); } @Override public int dataCaptureWindowSizeInSeconds() { if (this.inner().captureDescription() == null) { return 0; } return Utils.toPrimitiveInt(this.inner().captureDescription().intervalInSeconds()); } @Override public int dataCaptureWindowSizeInMB() { if (this.inner().captureDescription() == null) { return 0; } int inBytes = Utils.toPrimitiveInt(this.inner().captureDescription().sizeLimitInBytes()); if (inBytes != 0) { return inBytes / (1024 * 1024); } else { return 0; } } @Override public boolean dataCaptureSkipEmptyArchives() { if (this.inner().captureDescription() == null) { return false; } return this.inner().captureDescription().skipEmptyArchives(); } @Override public String dataCaptureFileNameFormat() { if (this.inner().captureDescription() == null) { return null; } else if (this.inner().captureDescription().destination() == null) { return null; } else { return this.inner().captureDescription().destination().archiveNameFormat(); } } @Override public Destination captureDestination() { if (this.inner().captureDescription() == null) { return null; } else { return this.inner().captureDescription().destination(); } } @Override public Set<String> partitionIds() { if (this.inner().partitionIds() == null) { return Collections.unmodifiableSet(new HashSet<String>()); } else { return Collections.unmodifiableSet(new HashSet<String>(this.inner().partitionIds())); } } @Override public int messageRetentionPeriodInDays() { return Utils.toPrimitiveInt(this.inner().messageRetentionInDays()); } @Override public EventHubImpl withNewNamespace(Creatable<EventHubNamespace> namespaceCreatable) { this.addDependency(namespaceCreatable); if (namespaceCreatable instanceof EventHubNamespaceImpl) { EventHubNamespaceImpl namespace = ((EventHubNamespaceImpl) namespaceCreatable); this.ancestor = new Ancestors().new OneAncestor(namespace.resourceGroupName(), namespaceCreatable.name()); } else { logger.logExceptionAsError(new IllegalArgumentException("The namespaceCreatable is invalid.")); } return this; } @Override public EventHubImpl withExistingNamespace(EventHubNamespace namespace) { this.ancestor = new Ancestors().new OneAncestor(selfId(namespace.id())); return this; } @Override public EventHubImpl withExistingNamespace(String resourceGroupName, String namespaceName) { this.ancestor = new Ancestors().new OneAncestor(resourceGroupName, namespaceName); return this; } @Override public EventHubImpl withExistingNamespaceId(String namespaceId) { this.ancestor = new Ancestors().new OneAncestor(selfId(namespaceId)); return this; } @Override public EventHubImpl withNewStorageAccountForCapturedData( Creatable<StorageAccount> storageAccountCreatable, String containerName) { this.captureSettings.withNewStorageAccountForCapturedData(storageAccountCreatable, containerName); return this; } @Override public EventHubImpl withExistingStorageAccountForCapturedData( StorageAccount storageAccount, String containerName) { this.captureSettings.withExistingStorageAccountForCapturedData(storageAccount, containerName); return this; } @Override public EventHubImpl withExistingStorageAccountForCapturedData( String storageAccountId, String containerName) { this.captureSettings.withExistingStorageAccountForCapturedData(storageAccountId, containerName); return this; } @Override public EventHubImpl withDataCaptureEnabled() { this.captureSettings.withDataCaptureEnabled(); return this; } @Override public EventHubImpl withDataCaptureDisabled() { this.captureSettings.withDataCaptureDisabled(); return this; } @Override public EventHubImpl withDataCaptureWindowSizeInSeconds(int sizeInSeconds) { this.captureSettings.withDataCaptureWindowSizeInSeconds(sizeInSeconds); return this; } @Override public EventHubImpl withDataCaptureSkipEmptyArchives(Boolean skipEmptyArchives) { this.captureSettings.withDataCaptureSkipEmptyArchives(skipEmptyArchives); return this; } @Override public EventHubImpl withDataCaptureWindowSizeInMB(int sizeInMB) { this.captureSettings.withDataCaptureWindowSizeInMB(sizeInMB); return this; } @Override public EventHubImpl withDataCaptureFileNameFormat(String format) { this.captureSettings.withDataCaptureFileNameFormat(format); return this; } @Override public EventHubImpl withNewSendRule(final String ruleName) { addPostRunDependent(context -> manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withSendAccess() .createAsync() .last()); return this; } @Override @Override public EventHubImpl withNewManageRule(final String ruleName) { addPostRunDependent(context -> manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withManageAccess() .createAsync() .last()); return this; } @Override public EventHubImpl withoutAuthorizationRule(final String ruleName) { addPostRunDependent(context -> manager().eventHubAuthorizationRules() .deleteByNameAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), ruleName) .then(context.voidMono())); return this; } @Override public EventHubImpl withNewConsumerGroup(final String name) { addPostRunDependent(context -> manager().consumerGroups() .define(name) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .createAsync() .last()); return this; } @Override public EventHubImpl withNewConsumerGroup(final String name, final String metadata) { addPostRunDependent(context -> manager().consumerGroups() .define(name) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withUserMetadata(metadata) .createAsync() .last()); return this; } @Override public EventHubImpl withoutConsumerGroup(final String name) { addPostRunDependent(context -> manager().consumerGroups() .deleteByNameAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), name) .then(context.voidMono())); return this; } @Override public EventHubImpl withPartitionCount(long count) { this.inner().withPartitionCount(count); return this; } @Override public EventHubImpl withRetentionPeriodInDays(long period) { this.inner().withMessageRetentionInDays(period); return this; } @Override public EventHubImpl update() { this.captureSettings = new CaptureSettings(this.inner()); return super.update(); } @Override public void beforeGroupCreateOrUpdate() { this.inner().withCaptureDescription(this.captureSettings.validateAndGetSettings()); } @Override public Mono<EventHub> createResourceAsync() { return this.manager.inner().getEventHubs() .createOrUpdateAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), this.inner()) .map(innerToFluentMap(this)); } @Override protected Mono<EventhubInner> getInnerAsync() { return this.manager.inner().getEventHubs().getAsync(this.ancestor().resourceGroupName(), this.ancestor().ancestor1Name(), this.name()); } @Override public PagedFlux<EventHubConsumerGroup> listConsumerGroupsAsync() { return this.manager.consumerGroups() .listByEventHubAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedFlux<EventHubAuthorizationRule> listAuthorizationRulesAsync() { return this.manager.eventHubAuthorizationRules() .listByEventHubAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedIterable<EventHubConsumerGroup> listConsumerGroups() { return this.manager.consumerGroups() .listByEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedIterable<EventHubAuthorizationRule> listAuthorizationRules() { return this.manager.eventHubAuthorizationRules() .listByEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } private Ancestors.OneAncestor ancestor() { Objects.requireNonNull(this.ancestor); return this.ancestor; } private String selfId(String parentId) { return String.format("%s/eventhubs/%s", parentId, this.name()); } private class CaptureSettings { private final CaptureDescription currentSettings; private CaptureDescription newSettings; CaptureSettings(final EventhubInner eventhubInner) { this.currentSettings = eventhubInner.captureDescription(); } public CaptureSettings withNewStorageAccountForCapturedData( final Creatable<StorageAccount> creatableStorageAccount, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId("temp-id"); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> creatableStorageAccount .createAsync() .last() .flatMap(indexable -> { StorageAccount storageAccount = (StorageAccount) indexable; ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); return createContainerIfNotExistsAsync(storageAccount, containerName); })); return this; } public CaptureSettings withExistingStorageAccountForCapturedData( final StorageAccount storageAccount, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> createContainerIfNotExistsAsync(storageAccount, containerName)); return this; } public CaptureSettings withExistingStorageAccountForCapturedData( final String storageAccountId, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId(storageAccountId); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> storageManager.storageAccounts() .getByIdAsync(storageAccountId) .flatMap(storageAccount -> { ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); return createContainerIfNotExistsAsync(storageAccount, containerName); })); return this; } public CaptureSettings withDataCaptureEnabled() { this.ensureSettings().withEnabled(true); return this; } public CaptureSettings withDataCaptureDisabled() { this.ensureSettings().withEnabled(false); return this; } public CaptureSettings withDataCaptureSkipEmptyArchives(Boolean skipEmptyArchives) { this.ensureSettings().withSkipEmptyArchives(skipEmptyArchives); return this; } public CaptureSettings withDataCaptureWindowSizeInSeconds(int sizeInSeconds) { this.ensureSettings().withIntervalInSeconds(sizeInSeconds); return this; } public CaptureSettings withDataCaptureWindowSizeInMB(int sizeInMB) { this.ensureSettings().withSizeLimitInBytes(sizeInMB * 1024 * 1024); return this; } public CaptureSettings withDataCaptureFileNameFormat(String format) { this.ensureSettings().destination().withArchiveNameFormat(format); return this; } public CaptureDescription validateAndGetSettings() { if (this.newSettings == null) { return this.currentSettings; } else if (this.newSettings.destination() == null || this.newSettings.destination().storageAccountResourceId() == null || this.newSettings.destination().blobContainer() == null) { throw logger.logExceptionAsError(new IllegalStateException( "Setting any of the capture properties requires " + "capture destination [StorageAccount, DataLake] to be specified")); } if (this.newSettings.destination().name() == null) { this.newSettings.destination().withName("EventHubArchive.AzureBlockBlob"); } if (this.newSettings.encoding() == null) { this.newSettings.withEncoding(EncodingCaptureDescription.AVRO); } return this.newSettings; } private CaptureDescription ensureSettings() { if (this.newSettings != null) { return this.newSettings; } else if (this.currentSettings == null) { this.newSettings = new CaptureDescription().withDestination(new Destination()); return this.newSettings; } else { this.newSettings = cloneCurrentSettings(); return this.newSettings; } } private Mono<Indexable> createContainerIfNotExistsAsync(final StorageAccount storageAccount, final String containerName) { return storageManager.blobContainers() .getAsync(storageAccount.resourceGroupName(), storageAccount.name(), containerName) .cast(Indexable.class) .onErrorResume(throwable -> storageManager.blobContainers() .defineContainer(containerName) .withExistingBlobService(storageAccount.resourceGroupName(), storageAccount.name()) .withPublicAccess(PublicAccess.CONTAINER) .createAsync() .last()); } private CaptureDescription cloneCurrentSettings() { Objects.requireNonNull(this.currentSettings); CaptureDescription clone = new CaptureDescription(); clone.withSizeLimitInBytes(this.currentSettings.sizeLimitInBytes()); clone.withSkipEmptyArchives(this.currentSettings.skipEmptyArchives()); clone.withIntervalInSeconds(this.currentSettings.intervalInSeconds()); clone.withEnabled(this.currentSettings.enabled()); clone.withEncoding(this.currentSettings.encoding()); if (this.currentSettings.destination() != null) { clone.withDestination(new Destination()); clone.destination().withArchiveNameFormat(this.currentSettings.destination().archiveNameFormat()); clone.destination().withBlobContainer(this.currentSettings.destination().blobContainer()); clone.destination().withName(this.currentSettings.destination().name()); clone.destination().withStorageAccountResourceId( this.currentSettings.destination().storageAccountResourceId()); } else { clone.withDestination(new Destination()); } return clone; } } }
class EventHubImpl extends NestedResourceImpl<EventHub, EventhubInner, EventHubImpl> implements EventHub, EventHub.Definition, EventHub.Update { private Ancestors.OneAncestor ancestor; private CaptureSettings captureSettings; private StorageManager storageManager; private Flux<Indexable> postRunTasks; private final ClientLogger logger = new ClientLogger(EventHubImpl.class); EventHubImpl(String name, EventhubInner inner, EventHubsManager manager, StorageManager storageManager) { super(name, inner, manager); this.ancestor = new Ancestors().new OneAncestor(inner.id()); this.captureSettings = new CaptureSettings(this.inner()); this.storageManager = storageManager; } EventHubImpl(String name, EventHubsManager manager, StorageManager storageManager) { super(name, new EventhubInner(), manager); this.storageManager = storageManager; this.captureSettings = new CaptureSettings(this.inner()); } @Override public String namespaceResourceGroupName() { return this.ancestor().resourceGroupName(); } @Override public String namespaceName() { return this.ancestor().ancestor1Name(); } @Override public boolean isDataCaptureEnabled() { if (this.inner().captureDescription() == null) { return false; } return Utils.toPrimitiveBoolean(this.inner().captureDescription().enabled()); } @Override public int dataCaptureWindowSizeInSeconds() { if (this.inner().captureDescription() == null) { return 0; } return Utils.toPrimitiveInt(this.inner().captureDescription().intervalInSeconds()); } @Override public int dataCaptureWindowSizeInMB() { if (this.inner().captureDescription() == null) { return 0; } int inBytes = Utils.toPrimitiveInt(this.inner().captureDescription().sizeLimitInBytes()); if (inBytes != 0) { return inBytes / (1024 * 1024); } else { return 0; } } @Override public boolean dataCaptureSkipEmptyArchives() { if (this.inner().captureDescription() == null) { return false; } return this.inner().captureDescription().skipEmptyArchives(); } @Override public String dataCaptureFileNameFormat() { if (this.inner().captureDescription() == null) { return null; } else if (this.inner().captureDescription().destination() == null) { return null; } else { return this.inner().captureDescription().destination().archiveNameFormat(); } } @Override public Destination captureDestination() { if (this.inner().captureDescription() == null) { return null; } else { return this.inner().captureDescription().destination(); } } @Override public Set<String> partitionIds() { if (this.inner().partitionIds() == null) { return Collections.unmodifiableSet(new HashSet<String>()); } else { return Collections.unmodifiableSet(new HashSet<String>(this.inner().partitionIds())); } } @Override public int messageRetentionPeriodInDays() { return Utils.toPrimitiveInt(this.inner().messageRetentionInDays()); } @Override public EventHubImpl withNewNamespace(Creatable<EventHubNamespace> namespaceCreatable) { this.addDependency(namespaceCreatable); if (namespaceCreatable instanceof EventHubNamespaceImpl) { EventHubNamespaceImpl namespace = ((EventHubNamespaceImpl) namespaceCreatable); this.ancestor = new Ancestors().new OneAncestor(namespace.resourceGroupName(), namespaceCreatable.name()); } else { logger.logExceptionAsError(new IllegalArgumentException("The namespaceCreatable is invalid.")); } return this; } @Override public EventHubImpl withExistingNamespace(EventHubNamespace namespace) { this.ancestor = new Ancestors().new OneAncestor(selfId(namespace.id())); return this; } @Override public EventHubImpl withExistingNamespace(String resourceGroupName, String namespaceName) { this.ancestor = new Ancestors().new OneAncestor(resourceGroupName, namespaceName); return this; } @Override public EventHubImpl withExistingNamespaceId(String namespaceId) { this.ancestor = new Ancestors().new OneAncestor(selfId(namespaceId)); return this; } @Override public EventHubImpl withNewStorageAccountForCapturedData( Creatable<StorageAccount> storageAccountCreatable, String containerName) { this.captureSettings.withNewStorageAccountForCapturedData(storageAccountCreatable, containerName); return this; } @Override public EventHubImpl withExistingStorageAccountForCapturedData( StorageAccount storageAccount, String containerName) { this.captureSettings.withExistingStorageAccountForCapturedData(storageAccount, containerName); return this; } @Override public EventHubImpl withExistingStorageAccountForCapturedData( String storageAccountId, String containerName) { this.captureSettings.withExistingStorageAccountForCapturedData(storageAccountId, containerName); return this; } @Override public EventHubImpl withDataCaptureEnabled() { this.captureSettings.withDataCaptureEnabled(); return this; } @Override public EventHubImpl withDataCaptureDisabled() { this.captureSettings.withDataCaptureDisabled(); return this; } @Override public EventHubImpl withDataCaptureWindowSizeInSeconds(int sizeInSeconds) { this.captureSettings.withDataCaptureWindowSizeInSeconds(sizeInSeconds); return this; } @Override public EventHubImpl withDataCaptureSkipEmptyArchives(Boolean skipEmptyArchives) { this.captureSettings.withDataCaptureSkipEmptyArchives(skipEmptyArchives); return this; } @Override public EventHubImpl withDataCaptureWindowSizeInMB(int sizeInMB) { this.captureSettings.withDataCaptureWindowSizeInMB(sizeInMB); return this; } @Override public EventHubImpl withDataCaptureFileNameFormat(String format) { this.captureSettings.withDataCaptureFileNameFormat(format); return this; } @Override public EventHubImpl withNewSendRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withSendAccess() .createAsync() .last()); return this; } @Override @Override public EventHubImpl withNewSendAndListenRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withSendAndListenAccess() .createAsync() .last()); return this; } @Override public EventHubImpl withNewManageRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withManageAccess() .createAsync() .last()); return this; } @Override public EventHubImpl withoutAuthorizationRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .deleteByNameAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), ruleName) .map(aVoid -> new VoidIndexable(UUID.randomUUID().toString()))); return this; } @Override public EventHubImpl withNewConsumerGroup(final String name) { concatPostRunTask(manager().consumerGroups() .define(name) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .createAsync() .last()); return this; } @Override public EventHubImpl withNewConsumerGroup(final String name, final String metadata) { concatPostRunTask(manager().consumerGroups() .define(name) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withUserMetadata(metadata) .createAsync() .last()); return this; } @Override public EventHubImpl withoutConsumerGroup(final String name) { concatPostRunTask(manager().consumerGroups() .deleteByNameAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), name) .map(aVoid -> new VoidIndexable(UUID.randomUUID().toString()))); return this; } @Override public EventHubImpl withPartitionCount(long count) { this.inner().withPartitionCount(count); return this; } @Override public EventHubImpl withRetentionPeriodInDays(long period) { this.inner().withMessageRetentionInDays(period); return this; } @Override public EventHubImpl update() { this.captureSettings = new CaptureSettings(this.inner()); return super.update(); } @Override public void beforeGroupCreateOrUpdate() { if (postRunTasks != null) { addPostRunDependent(context -> postRunTasks.last()); } this.inner().withCaptureDescription(this.captureSettings.validateAndGetSettings()); } @Override public Mono<EventHub> createResourceAsync() { return this.manager.inner().getEventHubs() .createOrUpdateAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), this.inner()) .map(innerToFluentMap(this)); } @Override public Mono<Void> afterPostRunAsync(boolean isGroupFaulted) { postRunTasks = null; return Mono.empty(); } @Override protected Mono<EventhubInner> getInnerAsync() { return this.manager.inner().getEventHubs().getAsync(this.ancestor().resourceGroupName(), this.ancestor().ancestor1Name(), this.name()); } @Override public PagedFlux<EventHubConsumerGroup> listConsumerGroupsAsync() { return this.manager.consumerGroups() .listByEventHubAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedFlux<EventHubAuthorizationRule> listAuthorizationRulesAsync() { return this.manager.eventHubAuthorizationRules() .listByEventHubAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedIterable<EventHubConsumerGroup> listConsumerGroups() { return this.manager.consumerGroups() .listByEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedIterable<EventHubAuthorizationRule> listAuthorizationRules() { return this.manager.eventHubAuthorizationRules() .listByEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } private Ancestors.OneAncestor ancestor() { Objects.requireNonNull(this.ancestor); return this.ancestor; } private String selfId(String parentId) { return String.format("%s/eventhubs/%s", parentId, this.name()); } private void concatPostRunTask(Mono<Indexable> task) { if (postRunTasks == null) { postRunTasks = Flux.empty(); } postRunTasks = postRunTasks.concatWith(task); } private class CaptureSettings { private final CaptureDescription currentSettings; private CaptureDescription newSettings; CaptureSettings(final EventhubInner eventhubInner) { this.currentSettings = eventhubInner.captureDescription(); } public CaptureSettings withNewStorageAccountForCapturedData( final Creatable<StorageAccount> creatableStorageAccount, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId("temp-id"); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> creatableStorageAccount .createAsync() .last() .flatMap(indexable -> { StorageAccount storageAccount = (StorageAccount) indexable; ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); return createContainerIfNotExistsAsync(storageAccount, containerName); })); return this; } public CaptureSettings withExistingStorageAccountForCapturedData( final StorageAccount storageAccount, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> createContainerIfNotExistsAsync(storageAccount, containerName)); return this; } public CaptureSettings withExistingStorageAccountForCapturedData( final String storageAccountId, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId(storageAccountId); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> storageManager.storageAccounts() .getByIdAsync(storageAccountId) .flatMap(storageAccount -> { ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); return createContainerIfNotExistsAsync(storageAccount, containerName); })); return this; } public CaptureSettings withDataCaptureEnabled() { this.ensureSettings().withEnabled(true); return this; } public CaptureSettings withDataCaptureDisabled() { this.ensureSettings().withEnabled(false); return this; } public CaptureSettings withDataCaptureSkipEmptyArchives(Boolean skipEmptyArchives) { this.ensureSettings().withSkipEmptyArchives(skipEmptyArchives); return this; } public CaptureSettings withDataCaptureWindowSizeInSeconds(int sizeInSeconds) { this.ensureSettings().withIntervalInSeconds(sizeInSeconds); return this; } public CaptureSettings withDataCaptureWindowSizeInMB(int sizeInMB) { this.ensureSettings().withSizeLimitInBytes(sizeInMB * 1024 * 1024); return this; } public CaptureSettings withDataCaptureFileNameFormat(String format) { this.ensureSettings().destination().withArchiveNameFormat(format); return this; } public CaptureDescription validateAndGetSettings() { if (this.newSettings == null) { return this.currentSettings; } else if (this.newSettings.destination() == null || this.newSettings.destination().storageAccountResourceId() == null || this.newSettings.destination().blobContainer() == null) { throw logger.logExceptionAsError(new IllegalStateException( "Setting any of the capture properties requires " + "capture destination [StorageAccount, DataLake] to be specified")); } if (this.newSettings.destination().name() == null) { this.newSettings.destination().withName("EventHubArchive.AzureBlockBlob"); } if (this.newSettings.encoding() == null) { this.newSettings.withEncoding(EncodingCaptureDescription.AVRO); } return this.newSettings; } private CaptureDescription ensureSettings() { if (this.newSettings != null) { return this.newSettings; } else if (this.currentSettings == null) { this.newSettings = new CaptureDescription().withDestination(new Destination()); return this.newSettings; } else { this.newSettings = cloneCurrentSettings(); return this.newSettings; } } private Mono<Indexable> createContainerIfNotExistsAsync(final StorageAccount storageAccount, final String containerName) { return storageManager.blobContainers() .getAsync(storageAccount.resourceGroupName(), storageAccount.name(), containerName) .cast(Indexable.class) .onErrorResume(throwable -> storageManager.blobContainers() .defineContainer(containerName) .withExistingBlobService(storageAccount.resourceGroupName(), storageAccount.name()) .withPublicAccess(PublicAccess.CONTAINER) .createAsync() .last()); } private CaptureDescription cloneCurrentSettings() { Objects.requireNonNull(this.currentSettings); CaptureDescription clone = new CaptureDescription(); clone.withSizeLimitInBytes(this.currentSettings.sizeLimitInBytes()); clone.withSkipEmptyArchives(this.currentSettings.skipEmptyArchives()); clone.withIntervalInSeconds(this.currentSettings.intervalInSeconds()); clone.withEnabled(this.currentSettings.enabled()); clone.withEncoding(this.currentSettings.encoding()); if (this.currentSettings.destination() != null) { clone.withDestination(new Destination()); clone.destination().withArchiveNameFormat(this.currentSettings.destination().archiveNameFormat()); clone.destination().withBlobContainer(this.currentSettings.destination().blobContainer()); clone.destination().withName(this.currentSettings.destination().name()); clone.destination().withStorageAccountResourceId( this.currentSettings.destination().storageAccountResourceId()); } else { clone.withDestination(new Destination()); } return clone; } } }
Eh, I didn't get it. What I mean is these REST requests under "eventhub" might need to be called sequentially, it might not be limited to this 2.
public EventHubImpl withNewListenRule(final String ruleName) { addPostRunDependent(context -> manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withListenAccess() .createAsync() .last()); return this; }
}
public EventHubImpl withNewListenRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withListenAccess() .createAsync() .last()); return this; }
class EventHubImpl extends NestedResourceImpl<EventHub, EventhubInner, EventHubImpl> implements EventHub, EventHub.Definition, EventHub.Update { private Ancestors.OneAncestor ancestor; private CaptureSettings captureSettings; private StorageManager storageManager; private final ClientLogger logger = new ClientLogger(EventHubImpl.class); EventHubImpl(String name, EventhubInner inner, EventHubsManager manager, StorageManager storageManager) { super(name, inner, manager); this.ancestor = new Ancestors().new OneAncestor(inner.id()); this.captureSettings = new CaptureSettings(this.inner()); this.storageManager = storageManager; } EventHubImpl(String name, EventHubsManager manager, StorageManager storageManager) { super(name, new EventhubInner(), manager); this.storageManager = storageManager; this.captureSettings = new CaptureSettings(this.inner()); } @Override public String namespaceResourceGroupName() { return this.ancestor().resourceGroupName(); } @Override public String namespaceName() { return this.ancestor().ancestor1Name(); } @Override public boolean isDataCaptureEnabled() { if (this.inner().captureDescription() == null) { return false; } return Utils.toPrimitiveBoolean(this.inner().captureDescription().enabled()); } @Override public int dataCaptureWindowSizeInSeconds() { if (this.inner().captureDescription() == null) { return 0; } return Utils.toPrimitiveInt(this.inner().captureDescription().intervalInSeconds()); } @Override public int dataCaptureWindowSizeInMB() { if (this.inner().captureDescription() == null) { return 0; } int inBytes = Utils.toPrimitiveInt(this.inner().captureDescription().sizeLimitInBytes()); if (inBytes != 0) { return inBytes / (1024 * 1024); } else { return 0; } } @Override public boolean dataCaptureSkipEmptyArchives() { if (this.inner().captureDescription() == null) { return false; } return this.inner().captureDescription().skipEmptyArchives(); } @Override public String dataCaptureFileNameFormat() { if (this.inner().captureDescription() == null) { return null; } else if (this.inner().captureDescription().destination() == null) { return null; } else { return this.inner().captureDescription().destination().archiveNameFormat(); } } @Override public Destination captureDestination() { if (this.inner().captureDescription() == null) { return null; } else { return this.inner().captureDescription().destination(); } } @Override public Set<String> partitionIds() { if (this.inner().partitionIds() == null) { return Collections.unmodifiableSet(new HashSet<String>()); } else { return Collections.unmodifiableSet(new HashSet<String>(this.inner().partitionIds())); } } @Override public int messageRetentionPeriodInDays() { return Utils.toPrimitiveInt(this.inner().messageRetentionInDays()); } @Override public EventHubImpl withNewNamespace(Creatable<EventHubNamespace> namespaceCreatable) { this.addDependency(namespaceCreatable); if (namespaceCreatable instanceof EventHubNamespaceImpl) { EventHubNamespaceImpl namespace = ((EventHubNamespaceImpl) namespaceCreatable); this.ancestor = new Ancestors().new OneAncestor(namespace.resourceGroupName(), namespaceCreatable.name()); } else { logger.logExceptionAsError(new IllegalArgumentException("The namespaceCreatable is invalid.")); } return this; } @Override public EventHubImpl withExistingNamespace(EventHubNamespace namespace) { this.ancestor = new Ancestors().new OneAncestor(selfId(namespace.id())); return this; } @Override public EventHubImpl withExistingNamespace(String resourceGroupName, String namespaceName) { this.ancestor = new Ancestors().new OneAncestor(resourceGroupName, namespaceName); return this; } @Override public EventHubImpl withExistingNamespaceId(String namespaceId) { this.ancestor = new Ancestors().new OneAncestor(selfId(namespaceId)); return this; } @Override public EventHubImpl withNewStorageAccountForCapturedData( Creatable<StorageAccount> storageAccountCreatable, String containerName) { this.captureSettings.withNewStorageAccountForCapturedData(storageAccountCreatable, containerName); return this; } @Override public EventHubImpl withExistingStorageAccountForCapturedData( StorageAccount storageAccount, String containerName) { this.captureSettings.withExistingStorageAccountForCapturedData(storageAccount, containerName); return this; } @Override public EventHubImpl withExistingStorageAccountForCapturedData( String storageAccountId, String containerName) { this.captureSettings.withExistingStorageAccountForCapturedData(storageAccountId, containerName); return this; } @Override public EventHubImpl withDataCaptureEnabled() { this.captureSettings.withDataCaptureEnabled(); return this; } @Override public EventHubImpl withDataCaptureDisabled() { this.captureSettings.withDataCaptureDisabled(); return this; } @Override public EventHubImpl withDataCaptureWindowSizeInSeconds(int sizeInSeconds) { this.captureSettings.withDataCaptureWindowSizeInSeconds(sizeInSeconds); return this; } @Override public EventHubImpl withDataCaptureSkipEmptyArchives(Boolean skipEmptyArchives) { this.captureSettings.withDataCaptureSkipEmptyArchives(skipEmptyArchives); return this; } @Override public EventHubImpl withDataCaptureWindowSizeInMB(int sizeInMB) { this.captureSettings.withDataCaptureWindowSizeInMB(sizeInMB); return this; } @Override public EventHubImpl withDataCaptureFileNameFormat(String format) { this.captureSettings.withDataCaptureFileNameFormat(format); return this; } @Override public EventHubImpl withNewSendRule(final String ruleName) { addPostRunDependent(context -> manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withSendAccess() .createAsync() .last()); return this; } @Override @Override public EventHubImpl withNewManageRule(final String ruleName) { addPostRunDependent(context -> manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withManageAccess() .createAsync() .last()); return this; } @Override public EventHubImpl withoutAuthorizationRule(final String ruleName) { addPostRunDependent(context -> manager().eventHubAuthorizationRules() .deleteByNameAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), ruleName) .then(context.voidMono())); return this; } @Override public EventHubImpl withNewConsumerGroup(final String name) { addPostRunDependent(context -> manager().consumerGroups() .define(name) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .createAsync() .last()); return this; } @Override public EventHubImpl withNewConsumerGroup(final String name, final String metadata) { addPostRunDependent(context -> manager().consumerGroups() .define(name) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withUserMetadata(metadata) .createAsync() .last()); return this; } @Override public EventHubImpl withoutConsumerGroup(final String name) { addPostRunDependent(context -> manager().consumerGroups() .deleteByNameAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), name) .then(context.voidMono())); return this; } @Override public EventHubImpl withPartitionCount(long count) { this.inner().withPartitionCount(count); return this; } @Override public EventHubImpl withRetentionPeriodInDays(long period) { this.inner().withMessageRetentionInDays(period); return this; } @Override public EventHubImpl update() { this.captureSettings = new CaptureSettings(this.inner()); return super.update(); } @Override public void beforeGroupCreateOrUpdate() { this.inner().withCaptureDescription(this.captureSettings.validateAndGetSettings()); } @Override public Mono<EventHub> createResourceAsync() { return this.manager.inner().getEventHubs() .createOrUpdateAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), this.inner()) .map(innerToFluentMap(this)); } @Override protected Mono<EventhubInner> getInnerAsync() { return this.manager.inner().getEventHubs().getAsync(this.ancestor().resourceGroupName(), this.ancestor().ancestor1Name(), this.name()); } @Override public PagedFlux<EventHubConsumerGroup> listConsumerGroupsAsync() { return this.manager.consumerGroups() .listByEventHubAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedFlux<EventHubAuthorizationRule> listAuthorizationRulesAsync() { return this.manager.eventHubAuthorizationRules() .listByEventHubAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedIterable<EventHubConsumerGroup> listConsumerGroups() { return this.manager.consumerGroups() .listByEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedIterable<EventHubAuthorizationRule> listAuthorizationRules() { return this.manager.eventHubAuthorizationRules() .listByEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } private Ancestors.OneAncestor ancestor() { Objects.requireNonNull(this.ancestor); return this.ancestor; } private String selfId(String parentId) { return String.format("%s/eventhubs/%s", parentId, this.name()); } private class CaptureSettings { private final CaptureDescription currentSettings; private CaptureDescription newSettings; CaptureSettings(final EventhubInner eventhubInner) { this.currentSettings = eventhubInner.captureDescription(); } public CaptureSettings withNewStorageAccountForCapturedData( final Creatable<StorageAccount> creatableStorageAccount, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId("temp-id"); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> creatableStorageAccount .createAsync() .last() .flatMap(indexable -> { StorageAccount storageAccount = (StorageAccount) indexable; ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); return createContainerIfNotExistsAsync(storageAccount, containerName); })); return this; } public CaptureSettings withExistingStorageAccountForCapturedData( final StorageAccount storageAccount, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> createContainerIfNotExistsAsync(storageAccount, containerName)); return this; } public CaptureSettings withExistingStorageAccountForCapturedData( final String storageAccountId, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId(storageAccountId); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> storageManager.storageAccounts() .getByIdAsync(storageAccountId) .flatMap(storageAccount -> { ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); return createContainerIfNotExistsAsync(storageAccount, containerName); })); return this; } public CaptureSettings withDataCaptureEnabled() { this.ensureSettings().withEnabled(true); return this; } public CaptureSettings withDataCaptureDisabled() { this.ensureSettings().withEnabled(false); return this; } public CaptureSettings withDataCaptureSkipEmptyArchives(Boolean skipEmptyArchives) { this.ensureSettings().withSkipEmptyArchives(skipEmptyArchives); return this; } public CaptureSettings withDataCaptureWindowSizeInSeconds(int sizeInSeconds) { this.ensureSettings().withIntervalInSeconds(sizeInSeconds); return this; } public CaptureSettings withDataCaptureWindowSizeInMB(int sizeInMB) { this.ensureSettings().withSizeLimitInBytes(sizeInMB * 1024 * 1024); return this; } public CaptureSettings withDataCaptureFileNameFormat(String format) { this.ensureSettings().destination().withArchiveNameFormat(format); return this; } public CaptureDescription validateAndGetSettings() { if (this.newSettings == null) { return this.currentSettings; } else if (this.newSettings.destination() == null || this.newSettings.destination().storageAccountResourceId() == null || this.newSettings.destination().blobContainer() == null) { throw logger.logExceptionAsError(new IllegalStateException( "Setting any of the capture properties requires " + "capture destination [StorageAccount, DataLake] to be specified")); } if (this.newSettings.destination().name() == null) { this.newSettings.destination().withName("EventHubArchive.AzureBlockBlob"); } if (this.newSettings.encoding() == null) { this.newSettings.withEncoding(EncodingCaptureDescription.AVRO); } return this.newSettings; } private CaptureDescription ensureSettings() { if (this.newSettings != null) { return this.newSettings; } else if (this.currentSettings == null) { this.newSettings = new CaptureDescription().withDestination(new Destination()); return this.newSettings; } else { this.newSettings = cloneCurrentSettings(); return this.newSettings; } } private Mono<Indexable> createContainerIfNotExistsAsync(final StorageAccount storageAccount, final String containerName) { return storageManager.blobContainers() .getAsync(storageAccount.resourceGroupName(), storageAccount.name(), containerName) .cast(Indexable.class) .onErrorResume(throwable -> storageManager.blobContainers() .defineContainer(containerName) .withExistingBlobService(storageAccount.resourceGroupName(), storageAccount.name()) .withPublicAccess(PublicAccess.CONTAINER) .createAsync() .last()); } private CaptureDescription cloneCurrentSettings() { Objects.requireNonNull(this.currentSettings); CaptureDescription clone = new CaptureDescription(); clone.withSizeLimitInBytes(this.currentSettings.sizeLimitInBytes()); clone.withSkipEmptyArchives(this.currentSettings.skipEmptyArchives()); clone.withIntervalInSeconds(this.currentSettings.intervalInSeconds()); clone.withEnabled(this.currentSettings.enabled()); clone.withEncoding(this.currentSettings.encoding()); if (this.currentSettings.destination() != null) { clone.withDestination(new Destination()); clone.destination().withArchiveNameFormat(this.currentSettings.destination().archiveNameFormat()); clone.destination().withBlobContainer(this.currentSettings.destination().blobContainer()); clone.destination().withName(this.currentSettings.destination().name()); clone.destination().withStorageAccountResourceId( this.currentSettings.destination().storageAccountResourceId()); } else { clone.withDestination(new Destination()); } return clone; } } }
class EventHubImpl extends NestedResourceImpl<EventHub, EventhubInner, EventHubImpl> implements EventHub, EventHub.Definition, EventHub.Update { private Ancestors.OneAncestor ancestor; private CaptureSettings captureSettings; private StorageManager storageManager; private Flux<Indexable> postRunTasks; private final ClientLogger logger = new ClientLogger(EventHubImpl.class); EventHubImpl(String name, EventhubInner inner, EventHubsManager manager, StorageManager storageManager) { super(name, inner, manager); this.ancestor = new Ancestors().new OneAncestor(inner.id()); this.captureSettings = new CaptureSettings(this.inner()); this.storageManager = storageManager; } EventHubImpl(String name, EventHubsManager manager, StorageManager storageManager) { super(name, new EventhubInner(), manager); this.storageManager = storageManager; this.captureSettings = new CaptureSettings(this.inner()); } @Override public String namespaceResourceGroupName() { return this.ancestor().resourceGroupName(); } @Override public String namespaceName() { return this.ancestor().ancestor1Name(); } @Override public boolean isDataCaptureEnabled() { if (this.inner().captureDescription() == null) { return false; } return Utils.toPrimitiveBoolean(this.inner().captureDescription().enabled()); } @Override public int dataCaptureWindowSizeInSeconds() { if (this.inner().captureDescription() == null) { return 0; } return Utils.toPrimitiveInt(this.inner().captureDescription().intervalInSeconds()); } @Override public int dataCaptureWindowSizeInMB() { if (this.inner().captureDescription() == null) { return 0; } int inBytes = Utils.toPrimitiveInt(this.inner().captureDescription().sizeLimitInBytes()); if (inBytes != 0) { return inBytes / (1024 * 1024); } else { return 0; } } @Override public boolean dataCaptureSkipEmptyArchives() { if (this.inner().captureDescription() == null) { return false; } return this.inner().captureDescription().skipEmptyArchives(); } @Override public String dataCaptureFileNameFormat() { if (this.inner().captureDescription() == null) { return null; } else if (this.inner().captureDescription().destination() == null) { return null; } else { return this.inner().captureDescription().destination().archiveNameFormat(); } } @Override public Destination captureDestination() { if (this.inner().captureDescription() == null) { return null; } else { return this.inner().captureDescription().destination(); } } @Override public Set<String> partitionIds() { if (this.inner().partitionIds() == null) { return Collections.unmodifiableSet(new HashSet<String>()); } else { return Collections.unmodifiableSet(new HashSet<String>(this.inner().partitionIds())); } } @Override public int messageRetentionPeriodInDays() { return Utils.toPrimitiveInt(this.inner().messageRetentionInDays()); } @Override public EventHubImpl withNewNamespace(Creatable<EventHubNamespace> namespaceCreatable) { this.addDependency(namespaceCreatable); if (namespaceCreatable instanceof EventHubNamespaceImpl) { EventHubNamespaceImpl namespace = ((EventHubNamespaceImpl) namespaceCreatable); this.ancestor = new Ancestors().new OneAncestor(namespace.resourceGroupName(), namespaceCreatable.name()); } else { logger.logExceptionAsError(new IllegalArgumentException("The namespaceCreatable is invalid.")); } return this; } @Override public EventHubImpl withExistingNamespace(EventHubNamespace namespace) { this.ancestor = new Ancestors().new OneAncestor(selfId(namespace.id())); return this; } @Override public EventHubImpl withExistingNamespace(String resourceGroupName, String namespaceName) { this.ancestor = new Ancestors().new OneAncestor(resourceGroupName, namespaceName); return this; } @Override public EventHubImpl withExistingNamespaceId(String namespaceId) { this.ancestor = new Ancestors().new OneAncestor(selfId(namespaceId)); return this; } @Override public EventHubImpl withNewStorageAccountForCapturedData( Creatable<StorageAccount> storageAccountCreatable, String containerName) { this.captureSettings.withNewStorageAccountForCapturedData(storageAccountCreatable, containerName); return this; } @Override public EventHubImpl withExistingStorageAccountForCapturedData( StorageAccount storageAccount, String containerName) { this.captureSettings.withExistingStorageAccountForCapturedData(storageAccount, containerName); return this; } @Override public EventHubImpl withExistingStorageAccountForCapturedData( String storageAccountId, String containerName) { this.captureSettings.withExistingStorageAccountForCapturedData(storageAccountId, containerName); return this; } @Override public EventHubImpl withDataCaptureEnabled() { this.captureSettings.withDataCaptureEnabled(); return this; } @Override public EventHubImpl withDataCaptureDisabled() { this.captureSettings.withDataCaptureDisabled(); return this; } @Override public EventHubImpl withDataCaptureWindowSizeInSeconds(int sizeInSeconds) { this.captureSettings.withDataCaptureWindowSizeInSeconds(sizeInSeconds); return this; } @Override public EventHubImpl withDataCaptureSkipEmptyArchives(Boolean skipEmptyArchives) { this.captureSettings.withDataCaptureSkipEmptyArchives(skipEmptyArchives); return this; } @Override public EventHubImpl withDataCaptureWindowSizeInMB(int sizeInMB) { this.captureSettings.withDataCaptureWindowSizeInMB(sizeInMB); return this; } @Override public EventHubImpl withDataCaptureFileNameFormat(String format) { this.captureSettings.withDataCaptureFileNameFormat(format); return this; } @Override public EventHubImpl withNewSendRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withSendAccess() .createAsync() .last()); return this; } @Override @Override public EventHubImpl withNewSendAndListenRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withSendAndListenAccess() .createAsync() .last()); return this; } @Override public EventHubImpl withNewManageRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withManageAccess() .createAsync() .last()); return this; } @Override public EventHubImpl withoutAuthorizationRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .deleteByNameAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), ruleName) .map(aVoid -> new VoidIndexable(UUID.randomUUID().toString()))); return this; } @Override public EventHubImpl withNewConsumerGroup(final String name) { concatPostRunTask(manager().consumerGroups() .define(name) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .createAsync() .last()); return this; } @Override public EventHubImpl withNewConsumerGroup(final String name, final String metadata) { concatPostRunTask(manager().consumerGroups() .define(name) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withUserMetadata(metadata) .createAsync() .last()); return this; } @Override public EventHubImpl withoutConsumerGroup(final String name) { concatPostRunTask(manager().consumerGroups() .deleteByNameAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), name) .map(aVoid -> new VoidIndexable(UUID.randomUUID().toString()))); return this; } @Override public EventHubImpl withPartitionCount(long count) { this.inner().withPartitionCount(count); return this; } @Override public EventHubImpl withRetentionPeriodInDays(long period) { this.inner().withMessageRetentionInDays(period); return this; } @Override public EventHubImpl update() { this.captureSettings = new CaptureSettings(this.inner()); return super.update(); } @Override public void beforeGroupCreateOrUpdate() { if (postRunTasks != null) { addPostRunDependent(context -> postRunTasks.last()); } this.inner().withCaptureDescription(this.captureSettings.validateAndGetSettings()); } @Override public Mono<EventHub> createResourceAsync() { return this.manager.inner().getEventHubs() .createOrUpdateAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), this.inner()) .map(innerToFluentMap(this)); } @Override public Mono<Void> afterPostRunAsync(boolean isGroupFaulted) { postRunTasks = null; return Mono.empty(); } @Override protected Mono<EventhubInner> getInnerAsync() { return this.manager.inner().getEventHubs().getAsync(this.ancestor().resourceGroupName(), this.ancestor().ancestor1Name(), this.name()); } @Override public PagedFlux<EventHubConsumerGroup> listConsumerGroupsAsync() { return this.manager.consumerGroups() .listByEventHubAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedFlux<EventHubAuthorizationRule> listAuthorizationRulesAsync() { return this.manager.eventHubAuthorizationRules() .listByEventHubAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedIterable<EventHubConsumerGroup> listConsumerGroups() { return this.manager.consumerGroups() .listByEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedIterable<EventHubAuthorizationRule> listAuthorizationRules() { return this.manager.eventHubAuthorizationRules() .listByEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } private Ancestors.OneAncestor ancestor() { Objects.requireNonNull(this.ancestor); return this.ancestor; } private String selfId(String parentId) { return String.format("%s/eventhubs/%s", parentId, this.name()); } private void concatPostRunTask(Mono<Indexable> task) { if (postRunTasks == null) { postRunTasks = Flux.empty(); } postRunTasks = postRunTasks.concatWith(task); } private class CaptureSettings { private final CaptureDescription currentSettings; private CaptureDescription newSettings; CaptureSettings(final EventhubInner eventhubInner) { this.currentSettings = eventhubInner.captureDescription(); } public CaptureSettings withNewStorageAccountForCapturedData( final Creatable<StorageAccount> creatableStorageAccount, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId("temp-id"); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> creatableStorageAccount .createAsync() .last() .flatMap(indexable -> { StorageAccount storageAccount = (StorageAccount) indexable; ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); return createContainerIfNotExistsAsync(storageAccount, containerName); })); return this; } public CaptureSettings withExistingStorageAccountForCapturedData( final StorageAccount storageAccount, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> createContainerIfNotExistsAsync(storageAccount, containerName)); return this; } public CaptureSettings withExistingStorageAccountForCapturedData( final String storageAccountId, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId(storageAccountId); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> storageManager.storageAccounts() .getByIdAsync(storageAccountId) .flatMap(storageAccount -> { ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); return createContainerIfNotExistsAsync(storageAccount, containerName); })); return this; } public CaptureSettings withDataCaptureEnabled() { this.ensureSettings().withEnabled(true); return this; } public CaptureSettings withDataCaptureDisabled() { this.ensureSettings().withEnabled(false); return this; } public CaptureSettings withDataCaptureSkipEmptyArchives(Boolean skipEmptyArchives) { this.ensureSettings().withSkipEmptyArchives(skipEmptyArchives); return this; } public CaptureSettings withDataCaptureWindowSizeInSeconds(int sizeInSeconds) { this.ensureSettings().withIntervalInSeconds(sizeInSeconds); return this; } public CaptureSettings withDataCaptureWindowSizeInMB(int sizeInMB) { this.ensureSettings().withSizeLimitInBytes(sizeInMB * 1024 * 1024); return this; } public CaptureSettings withDataCaptureFileNameFormat(String format) { this.ensureSettings().destination().withArchiveNameFormat(format); return this; } public CaptureDescription validateAndGetSettings() { if (this.newSettings == null) { return this.currentSettings; } else if (this.newSettings.destination() == null || this.newSettings.destination().storageAccountResourceId() == null || this.newSettings.destination().blobContainer() == null) { throw logger.logExceptionAsError(new IllegalStateException( "Setting any of the capture properties requires " + "capture destination [StorageAccount, DataLake] to be specified")); } if (this.newSettings.destination().name() == null) { this.newSettings.destination().withName("EventHubArchive.AzureBlockBlob"); } if (this.newSettings.encoding() == null) { this.newSettings.withEncoding(EncodingCaptureDescription.AVRO); } return this.newSettings; } private CaptureDescription ensureSettings() { if (this.newSettings != null) { return this.newSettings; } else if (this.currentSettings == null) { this.newSettings = new CaptureDescription().withDestination(new Destination()); return this.newSettings; } else { this.newSettings = cloneCurrentSettings(); return this.newSettings; } } private Mono<Indexable> createContainerIfNotExistsAsync(final StorageAccount storageAccount, final String containerName) { return storageManager.blobContainers() .getAsync(storageAccount.resourceGroupName(), storageAccount.name(), containerName) .cast(Indexable.class) .onErrorResume(throwable -> storageManager.blobContainers() .defineContainer(containerName) .withExistingBlobService(storageAccount.resourceGroupName(), storageAccount.name()) .withPublicAccess(PublicAccess.CONTAINER) .createAsync() .last()); } private CaptureDescription cloneCurrentSettings() { Objects.requireNonNull(this.currentSettings); CaptureDescription clone = new CaptureDescription(); clone.withSizeLimitInBytes(this.currentSettings.sizeLimitInBytes()); clone.withSkipEmptyArchives(this.currentSettings.skipEmptyArchives()); clone.withIntervalInSeconds(this.currentSettings.intervalInSeconds()); clone.withEnabled(this.currentSettings.enabled()); clone.withEncoding(this.currentSettings.encoding()); if (this.currentSettings.destination() != null) { clone.withDestination(new Destination()); clone.destination().withArchiveNameFormat(this.currentSettings.destination().archiveNameFormat()); clone.destination().withBlobContainer(this.currentSettings.destination().blobContainer()); clone.destination().withName(this.currentSettings.destination().name()); clone.destination().withStorageAccountResourceId( this.currentSettings.destination().storageAccountResourceId()); } else { clone.withDestination(new Destination()); } return clone; } } }
After discuss offline, we decide to concat the post run tasks in eventhubs.
public EventHubImpl withNewListenRule(final String ruleName) { addPostRunDependent(context -> manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withListenAccess() .createAsync() .last()); return this; }
}
public EventHubImpl withNewListenRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withListenAccess() .createAsync() .last()); return this; }
class EventHubImpl extends NestedResourceImpl<EventHub, EventhubInner, EventHubImpl> implements EventHub, EventHub.Definition, EventHub.Update { private Ancestors.OneAncestor ancestor; private CaptureSettings captureSettings; private StorageManager storageManager; private final ClientLogger logger = new ClientLogger(EventHubImpl.class); EventHubImpl(String name, EventhubInner inner, EventHubsManager manager, StorageManager storageManager) { super(name, inner, manager); this.ancestor = new Ancestors().new OneAncestor(inner.id()); this.captureSettings = new CaptureSettings(this.inner()); this.storageManager = storageManager; } EventHubImpl(String name, EventHubsManager manager, StorageManager storageManager) { super(name, new EventhubInner(), manager); this.storageManager = storageManager; this.captureSettings = new CaptureSettings(this.inner()); } @Override public String namespaceResourceGroupName() { return this.ancestor().resourceGroupName(); } @Override public String namespaceName() { return this.ancestor().ancestor1Name(); } @Override public boolean isDataCaptureEnabled() { if (this.inner().captureDescription() == null) { return false; } return Utils.toPrimitiveBoolean(this.inner().captureDescription().enabled()); } @Override public int dataCaptureWindowSizeInSeconds() { if (this.inner().captureDescription() == null) { return 0; } return Utils.toPrimitiveInt(this.inner().captureDescription().intervalInSeconds()); } @Override public int dataCaptureWindowSizeInMB() { if (this.inner().captureDescription() == null) { return 0; } int inBytes = Utils.toPrimitiveInt(this.inner().captureDescription().sizeLimitInBytes()); if (inBytes != 0) { return inBytes / (1024 * 1024); } else { return 0; } } @Override public boolean dataCaptureSkipEmptyArchives() { if (this.inner().captureDescription() == null) { return false; } return this.inner().captureDescription().skipEmptyArchives(); } @Override public String dataCaptureFileNameFormat() { if (this.inner().captureDescription() == null) { return null; } else if (this.inner().captureDescription().destination() == null) { return null; } else { return this.inner().captureDescription().destination().archiveNameFormat(); } } @Override public Destination captureDestination() { if (this.inner().captureDescription() == null) { return null; } else { return this.inner().captureDescription().destination(); } } @Override public Set<String> partitionIds() { if (this.inner().partitionIds() == null) { return Collections.unmodifiableSet(new HashSet<String>()); } else { return Collections.unmodifiableSet(new HashSet<String>(this.inner().partitionIds())); } } @Override public int messageRetentionPeriodInDays() { return Utils.toPrimitiveInt(this.inner().messageRetentionInDays()); } @Override public EventHubImpl withNewNamespace(Creatable<EventHubNamespace> namespaceCreatable) { this.addDependency(namespaceCreatable); if (namespaceCreatable instanceof EventHubNamespaceImpl) { EventHubNamespaceImpl namespace = ((EventHubNamespaceImpl) namespaceCreatable); this.ancestor = new Ancestors().new OneAncestor(namespace.resourceGroupName(), namespaceCreatable.name()); } else { logger.logExceptionAsError(new IllegalArgumentException("The namespaceCreatable is invalid.")); } return this; } @Override public EventHubImpl withExistingNamespace(EventHubNamespace namespace) { this.ancestor = new Ancestors().new OneAncestor(selfId(namespace.id())); return this; } @Override public EventHubImpl withExistingNamespace(String resourceGroupName, String namespaceName) { this.ancestor = new Ancestors().new OneAncestor(resourceGroupName, namespaceName); return this; } @Override public EventHubImpl withExistingNamespaceId(String namespaceId) { this.ancestor = new Ancestors().new OneAncestor(selfId(namespaceId)); return this; } @Override public EventHubImpl withNewStorageAccountForCapturedData( Creatable<StorageAccount> storageAccountCreatable, String containerName) { this.captureSettings.withNewStorageAccountForCapturedData(storageAccountCreatable, containerName); return this; } @Override public EventHubImpl withExistingStorageAccountForCapturedData( StorageAccount storageAccount, String containerName) { this.captureSettings.withExistingStorageAccountForCapturedData(storageAccount, containerName); return this; } @Override public EventHubImpl withExistingStorageAccountForCapturedData( String storageAccountId, String containerName) { this.captureSettings.withExistingStorageAccountForCapturedData(storageAccountId, containerName); return this; } @Override public EventHubImpl withDataCaptureEnabled() { this.captureSettings.withDataCaptureEnabled(); return this; } @Override public EventHubImpl withDataCaptureDisabled() { this.captureSettings.withDataCaptureDisabled(); return this; } @Override public EventHubImpl withDataCaptureWindowSizeInSeconds(int sizeInSeconds) { this.captureSettings.withDataCaptureWindowSizeInSeconds(sizeInSeconds); return this; } @Override public EventHubImpl withDataCaptureSkipEmptyArchives(Boolean skipEmptyArchives) { this.captureSettings.withDataCaptureSkipEmptyArchives(skipEmptyArchives); return this; } @Override public EventHubImpl withDataCaptureWindowSizeInMB(int sizeInMB) { this.captureSettings.withDataCaptureWindowSizeInMB(sizeInMB); return this; } @Override public EventHubImpl withDataCaptureFileNameFormat(String format) { this.captureSettings.withDataCaptureFileNameFormat(format); return this; } @Override public EventHubImpl withNewSendRule(final String ruleName) { addPostRunDependent(context -> manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withSendAccess() .createAsync() .last()); return this; } @Override @Override public EventHubImpl withNewManageRule(final String ruleName) { addPostRunDependent(context -> manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withManageAccess() .createAsync() .last()); return this; } @Override public EventHubImpl withoutAuthorizationRule(final String ruleName) { addPostRunDependent(context -> manager().eventHubAuthorizationRules() .deleteByNameAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), ruleName) .then(context.voidMono())); return this; } @Override public EventHubImpl withNewConsumerGroup(final String name) { addPostRunDependent(context -> manager().consumerGroups() .define(name) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .createAsync() .last()); return this; } @Override public EventHubImpl withNewConsumerGroup(final String name, final String metadata) { addPostRunDependent(context -> manager().consumerGroups() .define(name) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withUserMetadata(metadata) .createAsync() .last()); return this; } @Override public EventHubImpl withoutConsumerGroup(final String name) { addPostRunDependent(context -> manager().consumerGroups() .deleteByNameAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), name) .then(context.voidMono())); return this; } @Override public EventHubImpl withPartitionCount(long count) { this.inner().withPartitionCount(count); return this; } @Override public EventHubImpl withRetentionPeriodInDays(long period) { this.inner().withMessageRetentionInDays(period); return this; } @Override public EventHubImpl update() { this.captureSettings = new CaptureSettings(this.inner()); return super.update(); } @Override public void beforeGroupCreateOrUpdate() { this.inner().withCaptureDescription(this.captureSettings.validateAndGetSettings()); } @Override public Mono<EventHub> createResourceAsync() { return this.manager.inner().getEventHubs() .createOrUpdateAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), this.inner()) .map(innerToFluentMap(this)); } @Override protected Mono<EventhubInner> getInnerAsync() { return this.manager.inner().getEventHubs().getAsync(this.ancestor().resourceGroupName(), this.ancestor().ancestor1Name(), this.name()); } @Override public PagedFlux<EventHubConsumerGroup> listConsumerGroupsAsync() { return this.manager.consumerGroups() .listByEventHubAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedFlux<EventHubAuthorizationRule> listAuthorizationRulesAsync() { return this.manager.eventHubAuthorizationRules() .listByEventHubAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedIterable<EventHubConsumerGroup> listConsumerGroups() { return this.manager.consumerGroups() .listByEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedIterable<EventHubAuthorizationRule> listAuthorizationRules() { return this.manager.eventHubAuthorizationRules() .listByEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } private Ancestors.OneAncestor ancestor() { Objects.requireNonNull(this.ancestor); return this.ancestor; } private String selfId(String parentId) { return String.format("%s/eventhubs/%s", parentId, this.name()); } private class CaptureSettings { private final CaptureDescription currentSettings; private CaptureDescription newSettings; CaptureSettings(final EventhubInner eventhubInner) { this.currentSettings = eventhubInner.captureDescription(); } public CaptureSettings withNewStorageAccountForCapturedData( final Creatable<StorageAccount> creatableStorageAccount, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId("temp-id"); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> creatableStorageAccount .createAsync() .last() .flatMap(indexable -> { StorageAccount storageAccount = (StorageAccount) indexable; ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); return createContainerIfNotExistsAsync(storageAccount, containerName); })); return this; } public CaptureSettings withExistingStorageAccountForCapturedData( final StorageAccount storageAccount, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> createContainerIfNotExistsAsync(storageAccount, containerName)); return this; } public CaptureSettings withExistingStorageAccountForCapturedData( final String storageAccountId, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId(storageAccountId); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> storageManager.storageAccounts() .getByIdAsync(storageAccountId) .flatMap(storageAccount -> { ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); return createContainerIfNotExistsAsync(storageAccount, containerName); })); return this; } public CaptureSettings withDataCaptureEnabled() { this.ensureSettings().withEnabled(true); return this; } public CaptureSettings withDataCaptureDisabled() { this.ensureSettings().withEnabled(false); return this; } public CaptureSettings withDataCaptureSkipEmptyArchives(Boolean skipEmptyArchives) { this.ensureSettings().withSkipEmptyArchives(skipEmptyArchives); return this; } public CaptureSettings withDataCaptureWindowSizeInSeconds(int sizeInSeconds) { this.ensureSettings().withIntervalInSeconds(sizeInSeconds); return this; } public CaptureSettings withDataCaptureWindowSizeInMB(int sizeInMB) { this.ensureSettings().withSizeLimitInBytes(sizeInMB * 1024 * 1024); return this; } public CaptureSettings withDataCaptureFileNameFormat(String format) { this.ensureSettings().destination().withArchiveNameFormat(format); return this; } public CaptureDescription validateAndGetSettings() { if (this.newSettings == null) { return this.currentSettings; } else if (this.newSettings.destination() == null || this.newSettings.destination().storageAccountResourceId() == null || this.newSettings.destination().blobContainer() == null) { throw logger.logExceptionAsError(new IllegalStateException( "Setting any of the capture properties requires " + "capture destination [StorageAccount, DataLake] to be specified")); } if (this.newSettings.destination().name() == null) { this.newSettings.destination().withName("EventHubArchive.AzureBlockBlob"); } if (this.newSettings.encoding() == null) { this.newSettings.withEncoding(EncodingCaptureDescription.AVRO); } return this.newSettings; } private CaptureDescription ensureSettings() { if (this.newSettings != null) { return this.newSettings; } else if (this.currentSettings == null) { this.newSettings = new CaptureDescription().withDestination(new Destination()); return this.newSettings; } else { this.newSettings = cloneCurrentSettings(); return this.newSettings; } } private Mono<Indexable> createContainerIfNotExistsAsync(final StorageAccount storageAccount, final String containerName) { return storageManager.blobContainers() .getAsync(storageAccount.resourceGroupName(), storageAccount.name(), containerName) .cast(Indexable.class) .onErrorResume(throwable -> storageManager.blobContainers() .defineContainer(containerName) .withExistingBlobService(storageAccount.resourceGroupName(), storageAccount.name()) .withPublicAccess(PublicAccess.CONTAINER) .createAsync() .last()); } private CaptureDescription cloneCurrentSettings() { Objects.requireNonNull(this.currentSettings); CaptureDescription clone = new CaptureDescription(); clone.withSizeLimitInBytes(this.currentSettings.sizeLimitInBytes()); clone.withSkipEmptyArchives(this.currentSettings.skipEmptyArchives()); clone.withIntervalInSeconds(this.currentSettings.intervalInSeconds()); clone.withEnabled(this.currentSettings.enabled()); clone.withEncoding(this.currentSettings.encoding()); if (this.currentSettings.destination() != null) { clone.withDestination(new Destination()); clone.destination().withArchiveNameFormat(this.currentSettings.destination().archiveNameFormat()); clone.destination().withBlobContainer(this.currentSettings.destination().blobContainer()); clone.destination().withName(this.currentSettings.destination().name()); clone.destination().withStorageAccountResourceId( this.currentSettings.destination().storageAccountResourceId()); } else { clone.withDestination(new Destination()); } return clone; } } }
class EventHubImpl extends NestedResourceImpl<EventHub, EventhubInner, EventHubImpl> implements EventHub, EventHub.Definition, EventHub.Update { private Ancestors.OneAncestor ancestor; private CaptureSettings captureSettings; private StorageManager storageManager; private Flux<Indexable> postRunTasks; private final ClientLogger logger = new ClientLogger(EventHubImpl.class); EventHubImpl(String name, EventhubInner inner, EventHubsManager manager, StorageManager storageManager) { super(name, inner, manager); this.ancestor = new Ancestors().new OneAncestor(inner.id()); this.captureSettings = new CaptureSettings(this.inner()); this.storageManager = storageManager; } EventHubImpl(String name, EventHubsManager manager, StorageManager storageManager) { super(name, new EventhubInner(), manager); this.storageManager = storageManager; this.captureSettings = new CaptureSettings(this.inner()); } @Override public String namespaceResourceGroupName() { return this.ancestor().resourceGroupName(); } @Override public String namespaceName() { return this.ancestor().ancestor1Name(); } @Override public boolean isDataCaptureEnabled() { if (this.inner().captureDescription() == null) { return false; } return Utils.toPrimitiveBoolean(this.inner().captureDescription().enabled()); } @Override public int dataCaptureWindowSizeInSeconds() { if (this.inner().captureDescription() == null) { return 0; } return Utils.toPrimitiveInt(this.inner().captureDescription().intervalInSeconds()); } @Override public int dataCaptureWindowSizeInMB() { if (this.inner().captureDescription() == null) { return 0; } int inBytes = Utils.toPrimitiveInt(this.inner().captureDescription().sizeLimitInBytes()); if (inBytes != 0) { return inBytes / (1024 * 1024); } else { return 0; } } @Override public boolean dataCaptureSkipEmptyArchives() { if (this.inner().captureDescription() == null) { return false; } return this.inner().captureDescription().skipEmptyArchives(); } @Override public String dataCaptureFileNameFormat() { if (this.inner().captureDescription() == null) { return null; } else if (this.inner().captureDescription().destination() == null) { return null; } else { return this.inner().captureDescription().destination().archiveNameFormat(); } } @Override public Destination captureDestination() { if (this.inner().captureDescription() == null) { return null; } else { return this.inner().captureDescription().destination(); } } @Override public Set<String> partitionIds() { if (this.inner().partitionIds() == null) { return Collections.unmodifiableSet(new HashSet<String>()); } else { return Collections.unmodifiableSet(new HashSet<String>(this.inner().partitionIds())); } } @Override public int messageRetentionPeriodInDays() { return Utils.toPrimitiveInt(this.inner().messageRetentionInDays()); } @Override public EventHubImpl withNewNamespace(Creatable<EventHubNamespace> namespaceCreatable) { this.addDependency(namespaceCreatable); if (namespaceCreatable instanceof EventHubNamespaceImpl) { EventHubNamespaceImpl namespace = ((EventHubNamespaceImpl) namespaceCreatable); this.ancestor = new Ancestors().new OneAncestor(namespace.resourceGroupName(), namespaceCreatable.name()); } else { logger.logExceptionAsError(new IllegalArgumentException("The namespaceCreatable is invalid.")); } return this; } @Override public EventHubImpl withExistingNamespace(EventHubNamespace namespace) { this.ancestor = new Ancestors().new OneAncestor(selfId(namespace.id())); return this; } @Override public EventHubImpl withExistingNamespace(String resourceGroupName, String namespaceName) { this.ancestor = new Ancestors().new OneAncestor(resourceGroupName, namespaceName); return this; } @Override public EventHubImpl withExistingNamespaceId(String namespaceId) { this.ancestor = new Ancestors().new OneAncestor(selfId(namespaceId)); return this; } @Override public EventHubImpl withNewStorageAccountForCapturedData( Creatable<StorageAccount> storageAccountCreatable, String containerName) { this.captureSettings.withNewStorageAccountForCapturedData(storageAccountCreatable, containerName); return this; } @Override public EventHubImpl withExistingStorageAccountForCapturedData( StorageAccount storageAccount, String containerName) { this.captureSettings.withExistingStorageAccountForCapturedData(storageAccount, containerName); return this; } @Override public EventHubImpl withExistingStorageAccountForCapturedData( String storageAccountId, String containerName) { this.captureSettings.withExistingStorageAccountForCapturedData(storageAccountId, containerName); return this; } @Override public EventHubImpl withDataCaptureEnabled() { this.captureSettings.withDataCaptureEnabled(); return this; } @Override public EventHubImpl withDataCaptureDisabled() { this.captureSettings.withDataCaptureDisabled(); return this; } @Override public EventHubImpl withDataCaptureWindowSizeInSeconds(int sizeInSeconds) { this.captureSettings.withDataCaptureWindowSizeInSeconds(sizeInSeconds); return this; } @Override public EventHubImpl withDataCaptureSkipEmptyArchives(Boolean skipEmptyArchives) { this.captureSettings.withDataCaptureSkipEmptyArchives(skipEmptyArchives); return this; } @Override public EventHubImpl withDataCaptureWindowSizeInMB(int sizeInMB) { this.captureSettings.withDataCaptureWindowSizeInMB(sizeInMB); return this; } @Override public EventHubImpl withDataCaptureFileNameFormat(String format) { this.captureSettings.withDataCaptureFileNameFormat(format); return this; } @Override public EventHubImpl withNewSendRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withSendAccess() .createAsync() .last()); return this; } @Override @Override public EventHubImpl withNewSendAndListenRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withSendAndListenAccess() .createAsync() .last()); return this; } @Override public EventHubImpl withNewManageRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withManageAccess() .createAsync() .last()); return this; } @Override public EventHubImpl withoutAuthorizationRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .deleteByNameAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), ruleName) .map(aVoid -> new VoidIndexable(UUID.randomUUID().toString()))); return this; } @Override public EventHubImpl withNewConsumerGroup(final String name) { concatPostRunTask(manager().consumerGroups() .define(name) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .createAsync() .last()); return this; } @Override public EventHubImpl withNewConsumerGroup(final String name, final String metadata) { concatPostRunTask(manager().consumerGroups() .define(name) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withUserMetadata(metadata) .createAsync() .last()); return this; } @Override public EventHubImpl withoutConsumerGroup(final String name) { concatPostRunTask(manager().consumerGroups() .deleteByNameAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), name) .map(aVoid -> new VoidIndexable(UUID.randomUUID().toString()))); return this; } @Override public EventHubImpl withPartitionCount(long count) { this.inner().withPartitionCount(count); return this; } @Override public EventHubImpl withRetentionPeriodInDays(long period) { this.inner().withMessageRetentionInDays(period); return this; } @Override public EventHubImpl update() { this.captureSettings = new CaptureSettings(this.inner()); return super.update(); } @Override public void beforeGroupCreateOrUpdate() { if (postRunTasks != null) { addPostRunDependent(context -> postRunTasks.last()); } this.inner().withCaptureDescription(this.captureSettings.validateAndGetSettings()); } @Override public Mono<EventHub> createResourceAsync() { return this.manager.inner().getEventHubs() .createOrUpdateAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), this.inner()) .map(innerToFluentMap(this)); } @Override public Mono<Void> afterPostRunAsync(boolean isGroupFaulted) { postRunTasks = null; return Mono.empty(); } @Override protected Mono<EventhubInner> getInnerAsync() { return this.manager.inner().getEventHubs().getAsync(this.ancestor().resourceGroupName(), this.ancestor().ancestor1Name(), this.name()); } @Override public PagedFlux<EventHubConsumerGroup> listConsumerGroupsAsync() { return this.manager.consumerGroups() .listByEventHubAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedFlux<EventHubAuthorizationRule> listAuthorizationRulesAsync() { return this.manager.eventHubAuthorizationRules() .listByEventHubAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedIterable<EventHubConsumerGroup> listConsumerGroups() { return this.manager.consumerGroups() .listByEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedIterable<EventHubAuthorizationRule> listAuthorizationRules() { return this.manager.eventHubAuthorizationRules() .listByEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } private Ancestors.OneAncestor ancestor() { Objects.requireNonNull(this.ancestor); return this.ancestor; } private String selfId(String parentId) { return String.format("%s/eventhubs/%s", parentId, this.name()); } private void concatPostRunTask(Mono<Indexable> task) { if (postRunTasks == null) { postRunTasks = Flux.empty(); } postRunTasks = postRunTasks.concatWith(task); } private class CaptureSettings { private final CaptureDescription currentSettings; private CaptureDescription newSettings; CaptureSettings(final EventhubInner eventhubInner) { this.currentSettings = eventhubInner.captureDescription(); } public CaptureSettings withNewStorageAccountForCapturedData( final Creatable<StorageAccount> creatableStorageAccount, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId("temp-id"); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> creatableStorageAccount .createAsync() .last() .flatMap(indexable -> { StorageAccount storageAccount = (StorageAccount) indexable; ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); return createContainerIfNotExistsAsync(storageAccount, containerName); })); return this; } public CaptureSettings withExistingStorageAccountForCapturedData( final StorageAccount storageAccount, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> createContainerIfNotExistsAsync(storageAccount, containerName)); return this; } public CaptureSettings withExistingStorageAccountForCapturedData( final String storageAccountId, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId(storageAccountId); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> storageManager.storageAccounts() .getByIdAsync(storageAccountId) .flatMap(storageAccount -> { ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); return createContainerIfNotExistsAsync(storageAccount, containerName); })); return this; } public CaptureSettings withDataCaptureEnabled() { this.ensureSettings().withEnabled(true); return this; } public CaptureSettings withDataCaptureDisabled() { this.ensureSettings().withEnabled(false); return this; } public CaptureSettings withDataCaptureSkipEmptyArchives(Boolean skipEmptyArchives) { this.ensureSettings().withSkipEmptyArchives(skipEmptyArchives); return this; } public CaptureSettings withDataCaptureWindowSizeInSeconds(int sizeInSeconds) { this.ensureSettings().withIntervalInSeconds(sizeInSeconds); return this; } public CaptureSettings withDataCaptureWindowSizeInMB(int sizeInMB) { this.ensureSettings().withSizeLimitInBytes(sizeInMB * 1024 * 1024); return this; } public CaptureSettings withDataCaptureFileNameFormat(String format) { this.ensureSettings().destination().withArchiveNameFormat(format); return this; } public CaptureDescription validateAndGetSettings() { if (this.newSettings == null) { return this.currentSettings; } else if (this.newSettings.destination() == null || this.newSettings.destination().storageAccountResourceId() == null || this.newSettings.destination().blobContainer() == null) { throw logger.logExceptionAsError(new IllegalStateException( "Setting any of the capture properties requires " + "capture destination [StorageAccount, DataLake] to be specified")); } if (this.newSettings.destination().name() == null) { this.newSettings.destination().withName("EventHubArchive.AzureBlockBlob"); } if (this.newSettings.encoding() == null) { this.newSettings.withEncoding(EncodingCaptureDescription.AVRO); } return this.newSettings; } private CaptureDescription ensureSettings() { if (this.newSettings != null) { return this.newSettings; } else if (this.currentSettings == null) { this.newSettings = new CaptureDescription().withDestination(new Destination()); return this.newSettings; } else { this.newSettings = cloneCurrentSettings(); return this.newSettings; } } private Mono<Indexable> createContainerIfNotExistsAsync(final StorageAccount storageAccount, final String containerName) { return storageManager.blobContainers() .getAsync(storageAccount.resourceGroupName(), storageAccount.name(), containerName) .cast(Indexable.class) .onErrorResume(throwable -> storageManager.blobContainers() .defineContainer(containerName) .withExistingBlobService(storageAccount.resourceGroupName(), storageAccount.name()) .withPublicAccess(PublicAccess.CONTAINER) .createAsync() .last()); } private CaptureDescription cloneCurrentSettings() { Objects.requireNonNull(this.currentSettings); CaptureDescription clone = new CaptureDescription(); clone.withSizeLimitInBytes(this.currentSettings.sizeLimitInBytes()); clone.withSkipEmptyArchives(this.currentSettings.skipEmptyArchives()); clone.withIntervalInSeconds(this.currentSettings.intervalInSeconds()); clone.withEnabled(this.currentSettings.enabled()); clone.withEncoding(this.currentSettings.encoding()); if (this.currentSettings.destination() != null) { clone.withDestination(new Destination()); clone.destination().withArchiveNameFormat(this.currentSettings.destination().archiveNameFormat()); clone.destination().withBlobContainer(this.currentSettings.destination().blobContainer()); clone.destination().withName(this.currentSettings.destination().name()); clone.destination().withStorageAccountResourceId( this.currentSettings.destination().storageAccountResourceId()); } else { clone.withDestination(new Destination()); } return clone; } } }
```java postRunTasks = null; return Mono.empty(); ```
public Mono<Void> afterPostRunAsync(boolean isGroupFaulted) { return Mono.just(true) .map(aBoolean -> { postRunTasks = null; return aBoolean; }).then(); }
}
public Mono<Void> afterPostRunAsync(boolean isGroupFaulted) { postRunTasks = null; return Mono.empty(); }
class EventHubImpl extends NestedResourceImpl<EventHub, EventhubInner, EventHubImpl> implements EventHub, EventHub.Definition, EventHub.Update { private Ancestors.OneAncestor ancestor; private CaptureSettings captureSettings; private StorageManager storageManager; private Flux<Indexable> postRunTasks; private final ClientLogger logger = new ClientLogger(EventHubImpl.class); EventHubImpl(String name, EventhubInner inner, EventHubsManager manager, StorageManager storageManager) { super(name, inner, manager); this.ancestor = new Ancestors().new OneAncestor(inner.id()); this.captureSettings = new CaptureSettings(this.inner()); this.storageManager = storageManager; } EventHubImpl(String name, EventHubsManager manager, StorageManager storageManager) { super(name, new EventhubInner(), manager); this.storageManager = storageManager; this.captureSettings = new CaptureSettings(this.inner()); } @Override public String namespaceResourceGroupName() { return this.ancestor().resourceGroupName(); } @Override public String namespaceName() { return this.ancestor().ancestor1Name(); } @Override public boolean isDataCaptureEnabled() { if (this.inner().captureDescription() == null) { return false; } return Utils.toPrimitiveBoolean(this.inner().captureDescription().enabled()); } @Override public int dataCaptureWindowSizeInSeconds() { if (this.inner().captureDescription() == null) { return 0; } return Utils.toPrimitiveInt(this.inner().captureDescription().intervalInSeconds()); } @Override public int dataCaptureWindowSizeInMB() { if (this.inner().captureDescription() == null) { return 0; } int inBytes = Utils.toPrimitiveInt(this.inner().captureDescription().sizeLimitInBytes()); if (inBytes != 0) { return inBytes / (1024 * 1024); } else { return 0; } } @Override public boolean dataCaptureSkipEmptyArchives() { if (this.inner().captureDescription() == null) { return false; } return this.inner().captureDescription().skipEmptyArchives(); } @Override public String dataCaptureFileNameFormat() { if (this.inner().captureDescription() == null) { return null; } else if (this.inner().captureDescription().destination() == null) { return null; } else { return this.inner().captureDescription().destination().archiveNameFormat(); } } @Override public Destination captureDestination() { if (this.inner().captureDescription() == null) { return null; } else { return this.inner().captureDescription().destination(); } } @Override public Set<String> partitionIds() { if (this.inner().partitionIds() == null) { return Collections.unmodifiableSet(new HashSet<String>()); } else { return Collections.unmodifiableSet(new HashSet<String>(this.inner().partitionIds())); } } @Override public int messageRetentionPeriodInDays() { return Utils.toPrimitiveInt(this.inner().messageRetentionInDays()); } @Override public EventHubImpl withNewNamespace(Creatable<EventHubNamespace> namespaceCreatable) { this.addDependency(namespaceCreatable); if (namespaceCreatable instanceof EventHubNamespaceImpl) { EventHubNamespaceImpl namespace = ((EventHubNamespaceImpl) namespaceCreatable); this.ancestor = new Ancestors().new OneAncestor(namespace.resourceGroupName(), namespaceCreatable.name()); } else { logger.logExceptionAsError(new IllegalArgumentException("The namespaceCreatable is invalid.")); } return this; } @Override public EventHubImpl withExistingNamespace(EventHubNamespace namespace) { this.ancestor = new Ancestors().new OneAncestor(selfId(namespace.id())); return this; } @Override public EventHubImpl withExistingNamespace(String resourceGroupName, String namespaceName) { this.ancestor = new Ancestors().new OneAncestor(resourceGroupName, namespaceName); return this; } @Override public EventHubImpl withExistingNamespaceId(String namespaceId) { this.ancestor = new Ancestors().new OneAncestor(selfId(namespaceId)); return this; } @Override public EventHubImpl withNewStorageAccountForCapturedData( Creatable<StorageAccount> storageAccountCreatable, String containerName) { this.captureSettings.withNewStorageAccountForCapturedData(storageAccountCreatable, containerName); return this; } @Override public EventHubImpl withExistingStorageAccountForCapturedData( StorageAccount storageAccount, String containerName) { this.captureSettings.withExistingStorageAccountForCapturedData(storageAccount, containerName); return this; } @Override public EventHubImpl withExistingStorageAccountForCapturedData( String storageAccountId, String containerName) { this.captureSettings.withExistingStorageAccountForCapturedData(storageAccountId, containerName); return this; } @Override public EventHubImpl withDataCaptureEnabled() { this.captureSettings.withDataCaptureEnabled(); return this; } @Override public EventHubImpl withDataCaptureDisabled() { this.captureSettings.withDataCaptureDisabled(); return this; } @Override public EventHubImpl withDataCaptureWindowSizeInSeconds(int sizeInSeconds) { this.captureSettings.withDataCaptureWindowSizeInSeconds(sizeInSeconds); return this; } @Override public EventHubImpl withDataCaptureSkipEmptyArchives(Boolean skipEmptyArchives) { this.captureSettings.withDataCaptureSkipEmptyArchives(skipEmptyArchives); return this; } @Override public EventHubImpl withDataCaptureWindowSizeInMB(int sizeInMB) { this.captureSettings.withDataCaptureWindowSizeInMB(sizeInMB); return this; } @Override public EventHubImpl withDataCaptureFileNameFormat(String format) { this.captureSettings.withDataCaptureFileNameFormat(format); return this; } @Override public EventHubImpl withNewSendRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withSendAccess() .createAsync() .last()); return this; } @Override public EventHubImpl withNewListenRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withListenAccess() .createAsync() .last()); return this; } @Override public EventHubImpl withNewSendAndListenRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withSendAndListenAccess() .createAsync() .last()); return this; } @Override public EventHubImpl withNewManageRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withManageAccess() .createAsync() .last()); return this; } @Override public EventHubImpl withoutAuthorizationRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .deleteByNameAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), ruleName) .map(aVoid -> new VoidIndexable(UUID.randomUUID().toString()))); return this; } @Override public EventHubImpl withNewConsumerGroup(final String name) { concatPostRunTask(manager().consumerGroups() .define(name) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .createAsync() .last()); return this; } @Override public EventHubImpl withNewConsumerGroup(final String name, final String metadata) { concatPostRunTask(manager().consumerGroups() .define(name) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withUserMetadata(metadata) .createAsync() .last()); return this; } @Override public EventHubImpl withoutConsumerGroup(final String name) { concatPostRunTask(manager().consumerGroups() .deleteByNameAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), name) .map(aVoid -> new VoidIndexable(UUID.randomUUID().toString()))); return this; } @Override public EventHubImpl withPartitionCount(long count) { this.inner().withPartitionCount(count); return this; } @Override public EventHubImpl withRetentionPeriodInDays(long period) { this.inner().withMessageRetentionInDays(period); return this; } @Override public EventHubImpl update() { this.captureSettings = new CaptureSettings(this.inner()); return super.update(); } @Override public void beforeGroupCreateOrUpdate() { if (postRunTasks != null) { addPostRunDependent(context -> postRunTasks.last()); } this.inner().withCaptureDescription(this.captureSettings.validateAndGetSettings()); } @Override public Mono<EventHub> createResourceAsync() { return this.manager.inner().getEventHubs() .createOrUpdateAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), this.inner()) .map(innerToFluentMap(this)); } @Override @Override protected Mono<EventhubInner> getInnerAsync() { return this.manager.inner().getEventHubs().getAsync(this.ancestor().resourceGroupName(), this.ancestor().ancestor1Name(), this.name()); } @Override public PagedFlux<EventHubConsumerGroup> listConsumerGroupsAsync() { return this.manager.consumerGroups() .listByEventHubAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedFlux<EventHubAuthorizationRule> listAuthorizationRulesAsync() { return this.manager.eventHubAuthorizationRules() .listByEventHubAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedIterable<EventHubConsumerGroup> listConsumerGroups() { return this.manager.consumerGroups() .listByEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedIterable<EventHubAuthorizationRule> listAuthorizationRules() { return this.manager.eventHubAuthorizationRules() .listByEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } private Ancestors.OneAncestor ancestor() { Objects.requireNonNull(this.ancestor); return this.ancestor; } private String selfId(String parentId) { return String.format("%s/eventhubs/%s", parentId, this.name()); } private void concatPostRunTask(Mono<Indexable> task) { if (postRunTasks == null) { postRunTasks = Flux.empty(); } postRunTasks = postRunTasks.concatWith(task); } private class CaptureSettings { private final CaptureDescription currentSettings; private CaptureDescription newSettings; CaptureSettings(final EventhubInner eventhubInner) { this.currentSettings = eventhubInner.captureDescription(); } public CaptureSettings withNewStorageAccountForCapturedData( final Creatable<StorageAccount> creatableStorageAccount, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId("temp-id"); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> creatableStorageAccount .createAsync() .last() .flatMap(indexable -> { StorageAccount storageAccount = (StorageAccount) indexable; ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); return createContainerIfNotExistsAsync(storageAccount, containerName); })); return this; } public CaptureSettings withExistingStorageAccountForCapturedData( final StorageAccount storageAccount, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> createContainerIfNotExistsAsync(storageAccount, containerName)); return this; } public CaptureSettings withExistingStorageAccountForCapturedData( final String storageAccountId, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId(storageAccountId); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> storageManager.storageAccounts() .getByIdAsync(storageAccountId) .flatMap(storageAccount -> { ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); return createContainerIfNotExistsAsync(storageAccount, containerName); })); return this; } public CaptureSettings withDataCaptureEnabled() { this.ensureSettings().withEnabled(true); return this; } public CaptureSettings withDataCaptureDisabled() { this.ensureSettings().withEnabled(false); return this; } public CaptureSettings withDataCaptureSkipEmptyArchives(Boolean skipEmptyArchives) { this.ensureSettings().withSkipEmptyArchives(skipEmptyArchives); return this; } public CaptureSettings withDataCaptureWindowSizeInSeconds(int sizeInSeconds) { this.ensureSettings().withIntervalInSeconds(sizeInSeconds); return this; } public CaptureSettings withDataCaptureWindowSizeInMB(int sizeInMB) { this.ensureSettings().withSizeLimitInBytes(sizeInMB * 1024 * 1024); return this; } public CaptureSettings withDataCaptureFileNameFormat(String format) { this.ensureSettings().destination().withArchiveNameFormat(format); return this; } public CaptureDescription validateAndGetSettings() { if (this.newSettings == null) { return this.currentSettings; } else if (this.newSettings.destination() == null || this.newSettings.destination().storageAccountResourceId() == null || this.newSettings.destination().blobContainer() == null) { throw logger.logExceptionAsError(new IllegalStateException( "Setting any of the capture properties requires " + "capture destination [StorageAccount, DataLake] to be specified")); } if (this.newSettings.destination().name() == null) { this.newSettings.destination().withName("EventHubArchive.AzureBlockBlob"); } if (this.newSettings.encoding() == null) { this.newSettings.withEncoding(EncodingCaptureDescription.AVRO); } return this.newSettings; } private CaptureDescription ensureSettings() { if (this.newSettings != null) { return this.newSettings; } else if (this.currentSettings == null) { this.newSettings = new CaptureDescription().withDestination(new Destination()); return this.newSettings; } else { this.newSettings = cloneCurrentSettings(); return this.newSettings; } } private Mono<Indexable> createContainerIfNotExistsAsync(final StorageAccount storageAccount, final String containerName) { return storageManager.blobContainers() .getAsync(storageAccount.resourceGroupName(), storageAccount.name(), containerName) .cast(Indexable.class) .onErrorResume(throwable -> storageManager.blobContainers() .defineContainer(containerName) .withExistingBlobService(storageAccount.resourceGroupName(), storageAccount.name()) .withPublicAccess(PublicAccess.CONTAINER) .createAsync() .last()); } private CaptureDescription cloneCurrentSettings() { Objects.requireNonNull(this.currentSettings); CaptureDescription clone = new CaptureDescription(); clone.withSizeLimitInBytes(this.currentSettings.sizeLimitInBytes()); clone.withSkipEmptyArchives(this.currentSettings.skipEmptyArchives()); clone.withIntervalInSeconds(this.currentSettings.intervalInSeconds()); clone.withEnabled(this.currentSettings.enabled()); clone.withEncoding(this.currentSettings.encoding()); if (this.currentSettings.destination() != null) { clone.withDestination(new Destination()); clone.destination().withArchiveNameFormat(this.currentSettings.destination().archiveNameFormat()); clone.destination().withBlobContainer(this.currentSettings.destination().blobContainer()); clone.destination().withName(this.currentSettings.destination().name()); clone.destination().withStorageAccountResourceId( this.currentSettings.destination().storageAccountResourceId()); } else { clone.withDestination(new Destination()); } return clone; } } }
class EventHubImpl extends NestedResourceImpl<EventHub, EventhubInner, EventHubImpl> implements EventHub, EventHub.Definition, EventHub.Update { private Ancestors.OneAncestor ancestor; private CaptureSettings captureSettings; private StorageManager storageManager; private Flux<Indexable> postRunTasks; private final ClientLogger logger = new ClientLogger(EventHubImpl.class); EventHubImpl(String name, EventhubInner inner, EventHubsManager manager, StorageManager storageManager) { super(name, inner, manager); this.ancestor = new Ancestors().new OneAncestor(inner.id()); this.captureSettings = new CaptureSettings(this.inner()); this.storageManager = storageManager; } EventHubImpl(String name, EventHubsManager manager, StorageManager storageManager) { super(name, new EventhubInner(), manager); this.storageManager = storageManager; this.captureSettings = new CaptureSettings(this.inner()); } @Override public String namespaceResourceGroupName() { return this.ancestor().resourceGroupName(); } @Override public String namespaceName() { return this.ancestor().ancestor1Name(); } @Override public boolean isDataCaptureEnabled() { if (this.inner().captureDescription() == null) { return false; } return Utils.toPrimitiveBoolean(this.inner().captureDescription().enabled()); } @Override public int dataCaptureWindowSizeInSeconds() { if (this.inner().captureDescription() == null) { return 0; } return Utils.toPrimitiveInt(this.inner().captureDescription().intervalInSeconds()); } @Override public int dataCaptureWindowSizeInMB() { if (this.inner().captureDescription() == null) { return 0; } int inBytes = Utils.toPrimitiveInt(this.inner().captureDescription().sizeLimitInBytes()); if (inBytes != 0) { return inBytes / (1024 * 1024); } else { return 0; } } @Override public boolean dataCaptureSkipEmptyArchives() { if (this.inner().captureDescription() == null) { return false; } return this.inner().captureDescription().skipEmptyArchives(); } @Override public String dataCaptureFileNameFormat() { if (this.inner().captureDescription() == null) { return null; } else if (this.inner().captureDescription().destination() == null) { return null; } else { return this.inner().captureDescription().destination().archiveNameFormat(); } } @Override public Destination captureDestination() { if (this.inner().captureDescription() == null) { return null; } else { return this.inner().captureDescription().destination(); } } @Override public Set<String> partitionIds() { if (this.inner().partitionIds() == null) { return Collections.unmodifiableSet(new HashSet<String>()); } else { return Collections.unmodifiableSet(new HashSet<String>(this.inner().partitionIds())); } } @Override public int messageRetentionPeriodInDays() { return Utils.toPrimitiveInt(this.inner().messageRetentionInDays()); } @Override public EventHubImpl withNewNamespace(Creatable<EventHubNamespace> namespaceCreatable) { this.addDependency(namespaceCreatable); if (namespaceCreatable instanceof EventHubNamespaceImpl) { EventHubNamespaceImpl namespace = ((EventHubNamespaceImpl) namespaceCreatable); this.ancestor = new Ancestors().new OneAncestor(namespace.resourceGroupName(), namespaceCreatable.name()); } else { logger.logExceptionAsError(new IllegalArgumentException("The namespaceCreatable is invalid.")); } return this; } @Override public EventHubImpl withExistingNamespace(EventHubNamespace namespace) { this.ancestor = new Ancestors().new OneAncestor(selfId(namespace.id())); return this; } @Override public EventHubImpl withExistingNamespace(String resourceGroupName, String namespaceName) { this.ancestor = new Ancestors().new OneAncestor(resourceGroupName, namespaceName); return this; } @Override public EventHubImpl withExistingNamespaceId(String namespaceId) { this.ancestor = new Ancestors().new OneAncestor(selfId(namespaceId)); return this; } @Override public EventHubImpl withNewStorageAccountForCapturedData( Creatable<StorageAccount> storageAccountCreatable, String containerName) { this.captureSettings.withNewStorageAccountForCapturedData(storageAccountCreatable, containerName); return this; } @Override public EventHubImpl withExistingStorageAccountForCapturedData( StorageAccount storageAccount, String containerName) { this.captureSettings.withExistingStorageAccountForCapturedData(storageAccount, containerName); return this; } @Override public EventHubImpl withExistingStorageAccountForCapturedData( String storageAccountId, String containerName) { this.captureSettings.withExistingStorageAccountForCapturedData(storageAccountId, containerName); return this; } @Override public EventHubImpl withDataCaptureEnabled() { this.captureSettings.withDataCaptureEnabled(); return this; } @Override public EventHubImpl withDataCaptureDisabled() { this.captureSettings.withDataCaptureDisabled(); return this; } @Override public EventHubImpl withDataCaptureWindowSizeInSeconds(int sizeInSeconds) { this.captureSettings.withDataCaptureWindowSizeInSeconds(sizeInSeconds); return this; } @Override public EventHubImpl withDataCaptureSkipEmptyArchives(Boolean skipEmptyArchives) { this.captureSettings.withDataCaptureSkipEmptyArchives(skipEmptyArchives); return this; } @Override public EventHubImpl withDataCaptureWindowSizeInMB(int sizeInMB) { this.captureSettings.withDataCaptureWindowSizeInMB(sizeInMB); return this; } @Override public EventHubImpl withDataCaptureFileNameFormat(String format) { this.captureSettings.withDataCaptureFileNameFormat(format); return this; } @Override public EventHubImpl withNewSendRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withSendAccess() .createAsync() .last()); return this; } @Override public EventHubImpl withNewListenRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withListenAccess() .createAsync() .last()); return this; } @Override public EventHubImpl withNewSendAndListenRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withSendAndListenAccess() .createAsync() .last()); return this; } @Override public EventHubImpl withNewManageRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .define(ruleName) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withManageAccess() .createAsync() .last()); return this; } @Override public EventHubImpl withoutAuthorizationRule(final String ruleName) { concatPostRunTask(manager().eventHubAuthorizationRules() .deleteByNameAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), ruleName) .map(aVoid -> new VoidIndexable(UUID.randomUUID().toString()))); return this; } @Override public EventHubImpl withNewConsumerGroup(final String name) { concatPostRunTask(manager().consumerGroups() .define(name) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .createAsync() .last()); return this; } @Override public EventHubImpl withNewConsumerGroup(final String name, final String metadata) { concatPostRunTask(manager().consumerGroups() .define(name) .withExistingEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()) .withUserMetadata(metadata) .createAsync() .last()); return this; } @Override public EventHubImpl withoutConsumerGroup(final String name) { concatPostRunTask(manager().consumerGroups() .deleteByNameAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), name) .map(aVoid -> new VoidIndexable(UUID.randomUUID().toString()))); return this; } @Override public EventHubImpl withPartitionCount(long count) { this.inner().withPartitionCount(count); return this; } @Override public EventHubImpl withRetentionPeriodInDays(long period) { this.inner().withMessageRetentionInDays(period); return this; } @Override public EventHubImpl update() { this.captureSettings = new CaptureSettings(this.inner()); return super.update(); } @Override public void beforeGroupCreateOrUpdate() { if (postRunTasks != null) { addPostRunDependent(context -> postRunTasks.last()); } this.inner().withCaptureDescription(this.captureSettings.validateAndGetSettings()); } @Override public Mono<EventHub> createResourceAsync() { return this.manager.inner().getEventHubs() .createOrUpdateAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name(), this.inner()) .map(innerToFluentMap(this)); } @Override @Override protected Mono<EventhubInner> getInnerAsync() { return this.manager.inner().getEventHubs().getAsync(this.ancestor().resourceGroupName(), this.ancestor().ancestor1Name(), this.name()); } @Override public PagedFlux<EventHubConsumerGroup> listConsumerGroupsAsync() { return this.manager.consumerGroups() .listByEventHubAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedFlux<EventHubAuthorizationRule> listAuthorizationRulesAsync() { return this.manager.eventHubAuthorizationRules() .listByEventHubAsync(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedIterable<EventHubConsumerGroup> listConsumerGroups() { return this.manager.consumerGroups() .listByEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } @Override public PagedIterable<EventHubAuthorizationRule> listAuthorizationRules() { return this.manager.eventHubAuthorizationRules() .listByEventHub(ancestor().resourceGroupName(), ancestor().ancestor1Name(), name()); } private Ancestors.OneAncestor ancestor() { Objects.requireNonNull(this.ancestor); return this.ancestor; } private String selfId(String parentId) { return String.format("%s/eventhubs/%s", parentId, this.name()); } private void concatPostRunTask(Mono<Indexable> task) { if (postRunTasks == null) { postRunTasks = Flux.empty(); } postRunTasks = postRunTasks.concatWith(task); } private class CaptureSettings { private final CaptureDescription currentSettings; private CaptureDescription newSettings; CaptureSettings(final EventhubInner eventhubInner) { this.currentSettings = eventhubInner.captureDescription(); } public CaptureSettings withNewStorageAccountForCapturedData( final Creatable<StorageAccount> creatableStorageAccount, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId("temp-id"); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> creatableStorageAccount .createAsync() .last() .flatMap(indexable -> { StorageAccount storageAccount = (StorageAccount) indexable; ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); return createContainerIfNotExistsAsync(storageAccount, containerName); })); return this; } public CaptureSettings withExistingStorageAccountForCapturedData( final StorageAccount storageAccount, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> createContainerIfNotExistsAsync(storageAccount, containerName)); return this; } public CaptureSettings withExistingStorageAccountForCapturedData( final String storageAccountId, final String containerName) { this.ensureSettings().destination().withStorageAccountResourceId(storageAccountId); this.ensureSettings().destination().withBlobContainer(containerName); addDependency(context -> storageManager.storageAccounts() .getByIdAsync(storageAccountId) .flatMap(storageAccount -> { ensureSettings().destination().withStorageAccountResourceId(storageAccount.id()); return createContainerIfNotExistsAsync(storageAccount, containerName); })); return this; } public CaptureSettings withDataCaptureEnabled() { this.ensureSettings().withEnabled(true); return this; } public CaptureSettings withDataCaptureDisabled() { this.ensureSettings().withEnabled(false); return this; } public CaptureSettings withDataCaptureSkipEmptyArchives(Boolean skipEmptyArchives) { this.ensureSettings().withSkipEmptyArchives(skipEmptyArchives); return this; } public CaptureSettings withDataCaptureWindowSizeInSeconds(int sizeInSeconds) { this.ensureSettings().withIntervalInSeconds(sizeInSeconds); return this; } public CaptureSettings withDataCaptureWindowSizeInMB(int sizeInMB) { this.ensureSettings().withSizeLimitInBytes(sizeInMB * 1024 * 1024); return this; } public CaptureSettings withDataCaptureFileNameFormat(String format) { this.ensureSettings().destination().withArchiveNameFormat(format); return this; } public CaptureDescription validateAndGetSettings() { if (this.newSettings == null) { return this.currentSettings; } else if (this.newSettings.destination() == null || this.newSettings.destination().storageAccountResourceId() == null || this.newSettings.destination().blobContainer() == null) { throw logger.logExceptionAsError(new IllegalStateException( "Setting any of the capture properties requires " + "capture destination [StorageAccount, DataLake] to be specified")); } if (this.newSettings.destination().name() == null) { this.newSettings.destination().withName("EventHubArchive.AzureBlockBlob"); } if (this.newSettings.encoding() == null) { this.newSettings.withEncoding(EncodingCaptureDescription.AVRO); } return this.newSettings; } private CaptureDescription ensureSettings() { if (this.newSettings != null) { return this.newSettings; } else if (this.currentSettings == null) { this.newSettings = new CaptureDescription().withDestination(new Destination()); return this.newSettings; } else { this.newSettings = cloneCurrentSettings(); return this.newSettings; } } private Mono<Indexable> createContainerIfNotExistsAsync(final StorageAccount storageAccount, final String containerName) { return storageManager.blobContainers() .getAsync(storageAccount.resourceGroupName(), storageAccount.name(), containerName) .cast(Indexable.class) .onErrorResume(throwable -> storageManager.blobContainers() .defineContainer(containerName) .withExistingBlobService(storageAccount.resourceGroupName(), storageAccount.name()) .withPublicAccess(PublicAccess.CONTAINER) .createAsync() .last()); } private CaptureDescription cloneCurrentSettings() { Objects.requireNonNull(this.currentSettings); CaptureDescription clone = new CaptureDescription(); clone.withSizeLimitInBytes(this.currentSettings.sizeLimitInBytes()); clone.withSkipEmptyArchives(this.currentSettings.skipEmptyArchives()); clone.withIntervalInSeconds(this.currentSettings.intervalInSeconds()); clone.withEnabled(this.currentSettings.enabled()); clone.withEncoding(this.currentSettings.encoding()); if (this.currentSettings.destination() != null) { clone.withDestination(new Destination()); clone.destination().withArchiveNameFormat(this.currentSettings.destination().archiveNameFormat()); clone.destination().withBlobContainer(this.currentSettings.destination().blobContainer()); clone.destination().withName(this.currentSettings.destination().name()); clone.destination().withStorageAccountResourceId( this.currentSettings.destination().storageAccountResourceId()); } else { clone.withDestination(new Destination()); } return clone; } } }
same above
public Mono<Void> afterPostRunAsync(boolean isGroupFaulted) { return Mono.just(true) .map(aBoolean -> { postRunTasks = null; return aBoolean; }).then(); }
}).then();
public Mono<Void> afterPostRunAsync(boolean isGroupFaulted) { postRunTasks = null; return Mono.empty(); }
class EventHubNamespaceImpl extends GroupableResourceImpl<EventHubNamespace, EHNamespaceInner, EventHubNamespaceImpl, EventHubsManager> implements EventHubNamespace, EventHubNamespace.Definition, EventHubNamespace.Update { private Flux<Indexable> postRunTasks; protected EventHubNamespaceImpl(String name, EHNamespaceInner innerObject, EventHubsManager manager) { super(name, innerObject, manager); } @Override public EventHubNamespaceSkuType sku() { return new EventHubNamespaceSkuType(this.inner().sku()); } @Override public String azureInsightMetricId() { return this.inner().metricId(); } @Override public String serviceBusEndpoint() { return this.inner().serviceBusEndpoint(); } @Override public OffsetDateTime createdAt() { return this.inner().createdAt(); } @Override public OffsetDateTime updatedAt() { return this.inner().updatedAt(); } @Override public String provisioningState() { return this.inner().provisioningState(); } @Override public boolean isAutoScaleEnabled() { return Utils.toPrimitiveBoolean(this.inner().isAutoInflateEnabled()); } @Override public int currentThroughputUnits() { return Utils.toPrimitiveInt(this.inner().sku().capacity()); } @Override public int throughputUnitsUpperLimit() { return Utils.toPrimitiveInt(this.inner().maximumThroughputUnits()); } @Override public EventHubNamespaceImpl withNewEventHub(final String eventHubName) { concatPostRunTask(manager().eventHubs() .define(eventHubName) .withExistingNamespace(resourceGroupName(), name()) .createAsync() .last()); return this; } @Override public EventHubNamespaceImpl withNewEventHub(final String eventHubName, final int partitionCount) { concatPostRunTask(manager().eventHubs() .define(eventHubName) .withExistingNamespace(resourceGroupName(), name()) .withPartitionCount(partitionCount) .createAsync() .last()); return this; } @Override public EventHubNamespaceImpl withNewEventHub( final String eventHubName, final int partitionCount, final int retentionPeriodInDays) { concatPostRunTask(manager().eventHubs() .define(eventHubName) .withExistingNamespace(resourceGroupName(), name()) .withPartitionCount(partitionCount) .withRetentionPeriodInDays(retentionPeriodInDays) .createAsync() .last()); return this; } @Override public Update withoutEventHub(final String eventHubName) { concatPostRunTask(manager().eventHubs() .deleteByNameAsync(resourceGroupName(), name(), eventHubName) .map(aVoid -> new VoidIndexable(UUID.randomUUID().toString()))); return this; } @Override public EventHubNamespaceImpl withNewSendRule(final String ruleName) { concatPostRunTask(manager().namespaceAuthorizationRules() .define(ruleName) .withExistingNamespace(resourceGroupName(), name()) .withSendAccess() .createAsync() .last()); return this; } @Override public EventHubNamespaceImpl withNewListenRule(final String ruleName) { concatPostRunTask(manager().namespaceAuthorizationRules() .define(ruleName) .withExistingNamespace(resourceGroupName(), name()) .withListenAccess() .createAsync() .last()); return this; } @Override public EventHubNamespaceImpl withNewManageRule(final String ruleName) { concatPostRunTask(manager().namespaceAuthorizationRules() .define(ruleName) .withExistingNamespace(resourceGroupName(), name()) .withManageAccess() .createAsync() .last()); return this; } @Override public EventHubNamespaceImpl withoutAuthorizationRule(final String ruleName) { concatPostRunTask(manager().namespaceAuthorizationRules() .deleteByNameAsync(resourceGroupName(), name(), ruleName) .map(aVoid -> new VoidIndexable(UUID.randomUUID().toString()))); return this; } @Override public EventHubNamespaceImpl withAutoScaling() { this.setDefaultSkuIfNotSet(); this.inner().withIsAutoInflateEnabled(true); if (this.inner().maximumThroughputUnits() == null) { this.withThroughputUnitsUpperLimit(20); } return this; } @Override public EventHubNamespaceImpl withSku(EventHubNamespaceSkuType namespaceSku) { Sku newSkuInner = new Sku() .withName(namespaceSku.name()) .withTier(namespaceSku.tier()) .withCapacity(null); Sku currentSkuInner = this.inner().sku(); boolean isDifferent = currentSkuInner == null || !currentSkuInner.name().equals(newSkuInner.name()); if (isDifferent) { this.inner().withSku(newSkuInner); if (newSkuInner.name().equals(SkuName.STANDARD)) { newSkuInner.withCapacity(1); } } return this; } @Override public EventHubNamespaceImpl withCurrentThroughputUnits(int units) { this.setDefaultSkuIfNotSet(); this.inner().sku().withCapacity(units); return this; } @Override public EventHubNamespaceImpl withThroughputUnitsUpperLimit(int units) { this.inner().withMaximumThroughputUnits(units); return this; } @Override public void beforeGroupCreateOrUpdate() { if (postRunTasks != null) { addPostRunDependent(context -> postRunTasks.last()); } } @Override public Mono<EventHubNamespace> createResourceAsync() { return this.manager().inner().getNamespaces() .createOrUpdateAsync(resourceGroupName(), name(), this.inner()) .map(innerToFluentMap(this)); } @Override @Override public PagedFlux<EventHub> listEventHubsAsync() { return this.manager().eventHubs().listByNamespaceAsync(resourceGroupName(), name()); } @Override public PagedFlux<EventHubNamespaceAuthorizationRule> listAuthorizationRulesAsync() { return this.manager().namespaceAuthorizationRules() .listByNamespaceAsync(this.resourceGroupName(), this.name()); } @Override public PagedIterable<EventHub> listEventHubs() { return this.manager().eventHubs().listByNamespace(resourceGroupName(), name()); } @Override public PagedIterable<EventHubNamespaceAuthorizationRule> listAuthorizationRules() { return this.manager().namespaceAuthorizationRules() .listByNamespace(this.resourceGroupName(), this.name()); } @Override protected Mono<EHNamespaceInner> getInnerAsync() { return this.manager().inner().getNamespaces().getByResourceGroupAsync(this.resourceGroupName(), this.name()); } private void setDefaultSkuIfNotSet() { if (this.inner().sku() == null) { this.withSku(EventHubNamespaceSkuType.STANDARD); } } private void concatPostRunTask(Mono<Indexable> task) { if (postRunTasks == null) { postRunTasks = Flux.empty(); } postRunTasks = postRunTasks.concatWith(task); } }
class EventHubNamespaceImpl extends GroupableResourceImpl<EventHubNamespace, EHNamespaceInner, EventHubNamespaceImpl, EventHubsManager> implements EventHubNamespace, EventHubNamespace.Definition, EventHubNamespace.Update { private Flux<Indexable> postRunTasks; protected EventHubNamespaceImpl(String name, EHNamespaceInner innerObject, EventHubsManager manager) { super(name, innerObject, manager); } @Override public EventHubNamespaceSkuType sku() { return new EventHubNamespaceSkuType(this.inner().sku()); } @Override public String azureInsightMetricId() { return this.inner().metricId(); } @Override public String serviceBusEndpoint() { return this.inner().serviceBusEndpoint(); } @Override public OffsetDateTime createdAt() { return this.inner().createdAt(); } @Override public OffsetDateTime updatedAt() { return this.inner().updatedAt(); } @Override public String provisioningState() { return this.inner().provisioningState(); } @Override public boolean isAutoScaleEnabled() { return Utils.toPrimitiveBoolean(this.inner().isAutoInflateEnabled()); } @Override public int currentThroughputUnits() { return Utils.toPrimitiveInt(this.inner().sku().capacity()); } @Override public int throughputUnitsUpperLimit() { return Utils.toPrimitiveInt(this.inner().maximumThroughputUnits()); } @Override public EventHubNamespaceImpl withNewEventHub(final String eventHubName) { concatPostRunTask(manager().eventHubs() .define(eventHubName) .withExistingNamespace(resourceGroupName(), name()) .createAsync() .last()); return this; } @Override public EventHubNamespaceImpl withNewEventHub(final String eventHubName, final int partitionCount) { concatPostRunTask(manager().eventHubs() .define(eventHubName) .withExistingNamespace(resourceGroupName(), name()) .withPartitionCount(partitionCount) .createAsync() .last()); return this; } @Override public EventHubNamespaceImpl withNewEventHub( final String eventHubName, final int partitionCount, final int retentionPeriodInDays) { concatPostRunTask(manager().eventHubs() .define(eventHubName) .withExistingNamespace(resourceGroupName(), name()) .withPartitionCount(partitionCount) .withRetentionPeriodInDays(retentionPeriodInDays) .createAsync() .last()); return this; } @Override public Update withoutEventHub(final String eventHubName) { concatPostRunTask(manager().eventHubs() .deleteByNameAsync(resourceGroupName(), name(), eventHubName) .map(aVoid -> new VoidIndexable(UUID.randomUUID().toString()))); return this; } @Override public EventHubNamespaceImpl withNewSendRule(final String ruleName) { concatPostRunTask(manager().namespaceAuthorizationRules() .define(ruleName) .withExistingNamespace(resourceGroupName(), name()) .withSendAccess() .createAsync() .last()); return this; } @Override public EventHubNamespaceImpl withNewListenRule(final String ruleName) { concatPostRunTask(manager().namespaceAuthorizationRules() .define(ruleName) .withExistingNamespace(resourceGroupName(), name()) .withListenAccess() .createAsync() .last()); return this; } @Override public EventHubNamespaceImpl withNewManageRule(final String ruleName) { concatPostRunTask(manager().namespaceAuthorizationRules() .define(ruleName) .withExistingNamespace(resourceGroupName(), name()) .withManageAccess() .createAsync() .last()); return this; } @Override public EventHubNamespaceImpl withoutAuthorizationRule(final String ruleName) { concatPostRunTask(manager().namespaceAuthorizationRules() .deleteByNameAsync(resourceGroupName(), name(), ruleName) .map(aVoid -> new VoidIndexable(UUID.randomUUID().toString()))); return this; } @Override public EventHubNamespaceImpl withAutoScaling() { this.setDefaultSkuIfNotSet(); this.inner().withIsAutoInflateEnabled(true); if (this.inner().maximumThroughputUnits() == null) { this.withThroughputUnitsUpperLimit(20); } return this; } @Override public EventHubNamespaceImpl withSku(EventHubNamespaceSkuType namespaceSku) { Sku newSkuInner = new Sku() .withName(namespaceSku.name()) .withTier(namespaceSku.tier()) .withCapacity(null); Sku currentSkuInner = this.inner().sku(); boolean isDifferent = currentSkuInner == null || !currentSkuInner.name().equals(newSkuInner.name()); if (isDifferent) { this.inner().withSku(newSkuInner); if (newSkuInner.name().equals(SkuName.STANDARD)) { newSkuInner.withCapacity(1); } } return this; } @Override public EventHubNamespaceImpl withCurrentThroughputUnits(int units) { this.setDefaultSkuIfNotSet(); this.inner().sku().withCapacity(units); return this; } @Override public EventHubNamespaceImpl withThroughputUnitsUpperLimit(int units) { this.inner().withMaximumThroughputUnits(units); return this; } @Override public void beforeGroupCreateOrUpdate() { if (postRunTasks != null) { addPostRunDependent(context -> postRunTasks.last()); } } @Override public Mono<EventHubNamespace> createResourceAsync() { return this.manager().inner().getNamespaces() .createOrUpdateAsync(resourceGroupName(), name(), this.inner()) .map(innerToFluentMap(this)); } @Override @Override public PagedFlux<EventHub> listEventHubsAsync() { return this.manager().eventHubs().listByNamespaceAsync(resourceGroupName(), name()); } @Override public PagedFlux<EventHubNamespaceAuthorizationRule> listAuthorizationRulesAsync() { return this.manager().namespaceAuthorizationRules() .listByNamespaceAsync(this.resourceGroupName(), this.name()); } @Override public PagedIterable<EventHub> listEventHubs() { return this.manager().eventHubs().listByNamespace(resourceGroupName(), name()); } @Override public PagedIterable<EventHubNamespaceAuthorizationRule> listAuthorizationRules() { return this.manager().namespaceAuthorizationRules() .listByNamespace(this.resourceGroupName(), this.name()); } @Override protected Mono<EHNamespaceInner> getInnerAsync() { return this.manager().inner().getNamespaces().getByResourceGroupAsync(this.resourceGroupName(), this.name()); } private void setDefaultSkuIfNotSet() { if (this.inner().sku() == null) { this.withSku(EventHubNamespaceSkuType.STANDARD); } } private void concatPostRunTask(Mono<Indexable> task) { if (postRunTasks == null) { postRunTasks = Flux.empty(); } postRunTasks = postRunTasks.concatWith(task); } }
Should this be added to the map as well?
public static Region create(String name, String label) { Objects.requireNonNull(name, "'name' cannot be null."); Region region = VALUES_BY_NAME.get(name.toLowerCase(Locale.ROOT)); if (region != null) { return region; } else { return new Region(name, label); } }
return new Region(name, label);
public static Region create(String name, String label) { Objects.requireNonNull(name, "'name' cannot be null."); Region region = VALUES_BY_NAME.get(name.toLowerCase(Locale.ROOT)); if (region != null) { return region; } else { return new Region(name, label); } }
class Region { private static final ConcurrentMap<String, Region> VALUES_BY_NAME = new ConcurrentHashMap<>(); /* * Azure Cloud - Americas */ /** * East US (US) (recommended) */ public static final Region US_EAST = new Region("eastus", "East US"); /** * East US 2 (US) (recommended) */ public static final Region US_EAST2 = new Region("eastus2", "East US 2"); /** * South Central US (US) (recommended) */ public static final Region US_SOUTH_CENTRAL = new Region("southcentralus", "South Central US"); /** * West US 2 (US) (recommended) */ public static final Region US_WEST2 = new Region("westus2", "West US 2"); /** * Central US (US) (recommended) */ public static final Region US_CENTRAL = new Region("centralus", "Central US"); /** * North Central US (US) (recommended) */ public static final Region US_NORTH_CENTRAL = new Region("northcentralus", "North Central US"); /** * West US (US) (recommended) */ public static final Region US_WEST = new Region("westus", "West US"); /** * West Central US (US) */ public static final Region US_WEST_CENTRAL = new Region("westcentralus", "West Central US"); /** * Canada Central (Canada) (recommended) */ public static final Region CANADA_CENTRAL = new Region("canadacentral", "Canada Central"); /** * Canada East (Canada) */ public static final Region CANADA_EAST = new Region("canadaeast", "Canada East"); /** * Brazil South (South America) (recommended) */ public static final Region BRAZIL_SOUTH = new Region("brazilsouth", "Brazil South"); /** * Brazil Southeast (South America) */ public static final Region BRAZIL_SOUTHEAST = new Region("brazilsoutheast", "Brazil Southeast"); /* * Azure Cloud - Europe */ /** * North Europe (Europe) (recommended) */ public static final Region EUROPE_NORTH = new Region("northeurope", "North Europe"); /** * UK South (Europe) (recommended) */ public static final Region UK_SOUTH = new Region("uksouth", "UK South"); /** * West Europe (Europe) (recommended) */ public static final Region EUROPE_WEST = new Region("westeurope", "West Europe"); /** * France Central (Europe) (recommended) */ public static final Region FRANCE_CENTRAL = new Region("francecentral", "France Central"); /** * Germany West Central (Europe) (recommended) */ public static final Region GERMANY_WEST_CENTRAL = new Region("germanywestcentral", "Germany West Central"); /** * Norway East (Europe) (recommended) */ public static final Region NORWAY_EAST = new Region("norwayeast", "Norway East"); /** * Switzerland North (Europe) (recommended) */ public static final Region SWITZERLAND_NORTH = new Region("switzerlandnorth", "Switzerland North"); /** * France South (Europe) */ public static final Region FRANCE_SOUTH = new Region("francesouth", "France South"); /** * Germany North (Europe) */ public static final Region GERMANY_NORTH = new Region("germanynorth", "Germany North"); /** * Norway West (Europe) */ public static final Region NORWAY_WEST = new Region("norwaywest", "Norway West"); /** * Switzerland West (Europe) */ public static final Region SWITZERLAND_WEST = new Region("switzerlandwest", "Switzerland West"); /** * UK West (Europe) */ public static final Region UK_WEST = new Region("ukwest", "UK West"); /* * Azure Cloud - Asia */ /** * Australia East (Asia Pacific) (recommended) */ public static final Region AUSTRALIA_EAST = new Region("australiaeast", "Australia East"); /** * Southeast Asia (Asia Pacific) (recommended) */ public static final Region ASIA_SOUTHEAST = new Region("southeastasia", "Southeast Asia"); /** * Central India (Asia Pacific) (recommended) */ public static final Region INDIA_CENTRAL = new Region("centralindia", "Central India"); /** * East Asia (Asia Pacific) (recommended) */ public static final Region ASIA_EAST = new Region("eastasia", "East Asia"); /** * Japan East (Asia Pacific) (recommended) */ public static final Region JAPAN_EAST = new Region("japaneast", "Japan East"); /** * Korea Central (Asia Pacific) (recommended) */ public static final Region KOREA_CENTRAL = new Region("koreacentral", "Korea Central"); /** * Australia Central (Asia Pacific) */ public static final Region AUSTRALIA_CENTRAL = new Region("australiacentral", "Australia Central"); /** * Australia Central 2 (Asia Pacific) */ public static final Region AUSTRALIA_CENTRAL2 = new Region("australiacentral2", "Australia Central 2"); /** * Australia Southeast (Asia Pacific) */ public static final Region AUSTRALIA_SOUTHEAST = new Region("australiasoutheast", "Australia Southeast"); /** * Japan West (Asia Pacific) */ public static final Region JAPAN_WEST = new Region("japanwest", "Japan West"); /** * Korea South (Asia Pacific) */ public static final Region KOREA_SOUTH = new Region("koreasouth", "Korea South"); /** * South India (Asia Pacific) */ public static final Region INDIA_SOUTH = new Region("southindia", "South India"); /** * West India (Asia Pacific) */ public static final Region INDIA_WEST = new Region("westindia", "West India"); /* * Azure Cloud - Middle East and Africa */ /** * UAE North (Middle East) (recommended) */ public static final Region UAE_NORTH = new Region("uaenorth", "UAE North"); /** * UAE Central (Middle East) */ public static final Region UAE_CENTRAL = new Region("uaecentral", "UAE Central"); /** * South Africa North (Africa) (recommended) */ public static final Region SOUTHAFRICA_NORTH = new Region("southafricanorth", "South Africa North"); /** * South Africa West (Africa) */ public static final Region SOUTHAFRICA_WEST = new Region("southafricawest", "South Africa West"); /* * Azure China Cloud */ /** * China North */ public static final Region CHINA_NORTH = new Region("chinanorth", "China North"); /** * China East */ public static final Region CHINA_EAST = new Region("chinaeast", "China East"); /** * China North 2 */ public static final Region CHINA_NORTH2 = new Region("chinanorth2", "China North 2"); /** * China East 2 */ public static final Region CHINA_EAST2 = new Region("chinaeast2", "China East 2"); /* * Azure German Cloud */ /** * Germany Central */ public static final Region GERMANY_CENTRAL = new Region("germanycentral", "Germany Central"); /** * Germany Northeast */ public static final Region GERMANY_NORTHEAST = new Region("germanynortheast", "Germany Northeast"); /* * Azure Government Cloud */ /** * U.S. government cloud in Virginia. */ public static final Region GOV_US_VIRGINIA = new Region("usgovvirginia", "US Gov Virginia"); /** * U.S. government cloud in Iowa. */ public static final Region GOV_US_IOWA = new Region("usgoviowa", "US Gov Iowa"); /** * U.S. government cloud in Arizona. */ public static final Region GOV_US_ARIZONA = new Region("usgovarizona", "US Gov Arizona"); /** * U.S. government cloud in Texas. */ public static final Region GOV_US_TEXAS = new Region("usgovtexas", "US Gov Texas"); /** * U.S. Department of Defense cloud - East. */ public static final Region GOV_US_DOD_EAST = new Region("usdodeast", "US DoD East"); /** * U.S. Department of Defense cloud - Central. */ public static final Region GOV_US_DOD_CENTRAL = new Region("usdodcentral", "US DoD Central"); private final String name; private final String label; /** * @return predefined Azure regions */ public static Region[] values() { Collection<Region> valuesCollection = VALUES_BY_NAME.values(); return valuesCollection.toArray(new Region[valuesCollection.size()]); } private Region(String name, String label) { this.name = name; this.label = label; VALUES_BY_NAME.put(name.toLowerCase(Locale.ROOT), this); } /** * Creates a region from a name and a label. * * @param name the uniquely identifiable name of the region * @param label the label of the region * @return the newly created region */ @JsonValue @Override public String toString() { return name(); } /** * @return the name of the region */ public String name() { return this.name; } /** * @return the label of the region */ public String label() { return this.label; } /** * Parses a name into a Region object and creates a new Region instance if not found among the existing ones. * * @param name the name of the region * @return the parsed or created region */ public static Region fromName(String name) { if (name == null) { return null; } Region region = VALUES_BY_NAME.get(name.toLowerCase(Locale.ROOT).replace(" ", "")); if (region != null) { return region; } else { return Region.create(name.toLowerCase(Locale.ROOT).replace(" ", ""), name); } } @Override public int hashCode() { return this.name.hashCode(); } @Override public boolean equals(Object obj) { if (!(obj instanceof Region)) { return false; } else if (obj == this) { return true; } else { Region rhs = (Region) obj; return this.name.equalsIgnoreCase(rhs.name); } } }
class Region { private static final ConcurrentMap<String, Region> VALUES_BY_NAME = new ConcurrentHashMap<>(); /* * Azure Cloud - Americas */ /** * East US (US) (recommended) */ public static final Region US_EAST = new Region("eastus", "East US"); /** * East US 2 (US) (recommended) */ public static final Region US_EAST2 = new Region("eastus2", "East US 2"); /** * South Central US (US) (recommended) */ public static final Region US_SOUTH_CENTRAL = new Region("southcentralus", "South Central US"); /** * West US 2 (US) (recommended) */ public static final Region US_WEST2 = new Region("westus2", "West US 2"); /** * Central US (US) (recommended) */ public static final Region US_CENTRAL = new Region("centralus", "Central US"); /** * North Central US (US) (recommended) */ public static final Region US_NORTH_CENTRAL = new Region("northcentralus", "North Central US"); /** * West US (US) (recommended) */ public static final Region US_WEST = new Region("westus", "West US"); /** * West Central US (US) */ public static final Region US_WEST_CENTRAL = new Region("westcentralus", "West Central US"); /** * Canada Central (Canada) (recommended) */ public static final Region CANADA_CENTRAL = new Region("canadacentral", "Canada Central"); /** * Canada East (Canada) */ public static final Region CANADA_EAST = new Region("canadaeast", "Canada East"); /** * Brazil South (South America) (recommended) */ public static final Region BRAZIL_SOUTH = new Region("brazilsouth", "Brazil South"); /** * Brazil Southeast (South America) */ public static final Region BRAZIL_SOUTHEAST = new Region("brazilsoutheast", "Brazil Southeast"); /* * Azure Cloud - Europe */ /** * North Europe (Europe) (recommended) */ public static final Region EUROPE_NORTH = new Region("northeurope", "North Europe"); /** * UK South (Europe) (recommended) */ public static final Region UK_SOUTH = new Region("uksouth", "UK South"); /** * West Europe (Europe) (recommended) */ public static final Region EUROPE_WEST = new Region("westeurope", "West Europe"); /** * France Central (Europe) (recommended) */ public static final Region FRANCE_CENTRAL = new Region("francecentral", "France Central"); /** * Germany West Central (Europe) (recommended) */ public static final Region GERMANY_WEST_CENTRAL = new Region("germanywestcentral", "Germany West Central"); /** * Norway East (Europe) (recommended) */ public static final Region NORWAY_EAST = new Region("norwayeast", "Norway East"); /** * Switzerland North (Europe) (recommended) */ public static final Region SWITZERLAND_NORTH = new Region("switzerlandnorth", "Switzerland North"); /** * France South (Europe) */ public static final Region FRANCE_SOUTH = new Region("francesouth", "France South"); /** * Germany North (Europe) */ public static final Region GERMANY_NORTH = new Region("germanynorth", "Germany North"); /** * Norway West (Europe) */ public static final Region NORWAY_WEST = new Region("norwaywest", "Norway West"); /** * Switzerland West (Europe) */ public static final Region SWITZERLAND_WEST = new Region("switzerlandwest", "Switzerland West"); /** * UK West (Europe) */ public static final Region UK_WEST = new Region("ukwest", "UK West"); /* * Azure Cloud - Asia */ /** * Australia East (Asia Pacific) (recommended) */ public static final Region AUSTRALIA_EAST = new Region("australiaeast", "Australia East"); /** * Southeast Asia (Asia Pacific) (recommended) */ public static final Region ASIA_SOUTHEAST = new Region("southeastasia", "Southeast Asia"); /** * Central India (Asia Pacific) (recommended) */ public static final Region INDIA_CENTRAL = new Region("centralindia", "Central India"); /** * East Asia (Asia Pacific) (recommended) */ public static final Region ASIA_EAST = new Region("eastasia", "East Asia"); /** * Japan East (Asia Pacific) (recommended) */ public static final Region JAPAN_EAST = new Region("japaneast", "Japan East"); /** * Korea Central (Asia Pacific) (recommended) */ public static final Region KOREA_CENTRAL = new Region("koreacentral", "Korea Central"); /** * Australia Central (Asia Pacific) */ public static final Region AUSTRALIA_CENTRAL = new Region("australiacentral", "Australia Central"); /** * Australia Central 2 (Asia Pacific) */ public static final Region AUSTRALIA_CENTRAL2 = new Region("australiacentral2", "Australia Central 2"); /** * Australia Southeast (Asia Pacific) */ public static final Region AUSTRALIA_SOUTHEAST = new Region("australiasoutheast", "Australia Southeast"); /** * Japan West (Asia Pacific) */ public static final Region JAPAN_WEST = new Region("japanwest", "Japan West"); /** * Korea South (Asia Pacific) */ public static final Region KOREA_SOUTH = new Region("koreasouth", "Korea South"); /** * South India (Asia Pacific) */ public static final Region INDIA_SOUTH = new Region("southindia", "South India"); /** * West India (Asia Pacific) */ public static final Region INDIA_WEST = new Region("westindia", "West India"); /* * Azure Cloud - Middle East and Africa */ /** * UAE North (Middle East) (recommended) */ public static final Region UAE_NORTH = new Region("uaenorth", "UAE North"); /** * UAE Central (Middle East) */ public static final Region UAE_CENTRAL = new Region("uaecentral", "UAE Central"); /** * South Africa North (Africa) (recommended) */ public static final Region SOUTHAFRICA_NORTH = new Region("southafricanorth", "South Africa North"); /** * South Africa West (Africa) */ public static final Region SOUTHAFRICA_WEST = new Region("southafricawest", "South Africa West"); /* * Azure China Cloud */ /** * China North */ public static final Region CHINA_NORTH = new Region("chinanorth", "China North"); /** * China East */ public static final Region CHINA_EAST = new Region("chinaeast", "China East"); /** * China North 2 */ public static final Region CHINA_NORTH2 = new Region("chinanorth2", "China North 2"); /** * China East 2 */ public static final Region CHINA_EAST2 = new Region("chinaeast2", "China East 2"); /* * Azure German Cloud */ /** * Germany Central */ public static final Region GERMANY_CENTRAL = new Region("germanycentral", "Germany Central"); /** * Germany Northeast */ public static final Region GERMANY_NORTHEAST = new Region("germanynortheast", "Germany Northeast"); /* * Azure Government Cloud */ /** * U.S. government cloud in Virginia. */ public static final Region GOV_US_VIRGINIA = new Region("usgovvirginia", "US Gov Virginia"); /** * U.S. government cloud in Iowa. */ public static final Region GOV_US_IOWA = new Region("usgoviowa", "US Gov Iowa"); /** * U.S. government cloud in Arizona. */ public static final Region GOV_US_ARIZONA = new Region("usgovarizona", "US Gov Arizona"); /** * U.S. government cloud in Texas. */ public static final Region GOV_US_TEXAS = new Region("usgovtexas", "US Gov Texas"); /** * U.S. Department of Defense cloud - East. */ public static final Region GOV_US_DOD_EAST = new Region("usdodeast", "US DoD East"); /** * U.S. Department of Defense cloud - Central. */ public static final Region GOV_US_DOD_CENTRAL = new Region("usdodcentral", "US DoD Central"); private final String name; private final String label; /** * @return predefined Azure regions. */ public static Collection<Region> values() { return VALUES_BY_NAME.values(); } private Region(String name, String label) { this.name = name; this.label = label; VALUES_BY_NAME.put(name.toLowerCase(Locale.ROOT), this); } /** * Creates a region from a name and a label. * * @param name the uniquely identifiable name of the region. * @param label the label of the region. * @return the newly created region. */ @JsonValue @Override public String toString() { return name(); } /** * @return the name of the region. */ public String name() { return this.name; } /** * @return the label of the region. */ public String label() { return this.label; } /** * Parses a name into a Region object and creates a new Region instance if not found among the existing ones. * * @param name the name of the region. * @return the parsed or created region. */ public static Region fromName(String name) { if (name == null) { return null; } Region region = VALUES_BY_NAME.get(name.toLowerCase(Locale.ROOT).replace(" ", "")); if (region != null) { return region; } else { return Region.create(name.toLowerCase(Locale.ROOT).replace(" ", ""), name); } } @Override public int hashCode() { return this.name.hashCode(); } @Override public boolean equals(Object obj) { if (!(obj instanceof Region)) { return false; } else if (obj == this) { return true; } else { Region rhs = (Region) obj; return this.name.equalsIgnoreCase(rhs.name); } } }
when we get to the samples code, we will need to read this from the disk.
public static void main(String[] args) throws InterruptedException, JsonProcessingException { String tenantId = System.getenv("TENANT_ID"); String clientId = System.getenv("CLIENT_ID"); String clientSecret = System.getenv("CLIENT_SECRET"); String endpoint = System.getenv("DIGITAL_TWINS_ENDPOINT"); String modelId = "dtmi:samples:Building;1"; TokenCredential tokenCredential = new ClientSecretCredentialBuilder() .tenantId(tenantId) .clientId(clientId) .clientSecret(clientSecret) .build(); DigitalTwinsAsyncClient client = new DigitalTwinsClientBuilder() .tokenCredential(tokenCredential) .endpoint(endpoint) .httpLogOptions( new HttpLogOptions() .setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .buildAsyncClient(); final Semaphore createTwinsSemaphore = new Semaphore(0); DigitalTwinMetadata metadata = new DigitalTwinMetadata().setModelId(modelId); String dtId_String = "dt_String_" + random.nextInt(); BasicDigitalTwin basicDigitalTwin = new BasicDigitalTwin() .setId(dtId_String) .setMetadata(metadata) .setCustomProperties("AverageTemperature", random.nextInt(50)) .setCustomProperties("TemperatureUnit", "Celsius"); String dt_String = mapper.writeValueAsString(basicDigitalTwin); Mono<DigitalTwinsResponse<String>> sourceTwinWithResponseString = client.createDigitalTwinWithResponse(dtId_String, dt_String); sourceTwinWithResponseString.subscribe( result -> { System.out.println(String.format("%s: Created twin, Status = %d, Etag = %s", dtId_String, result.getStatusCode(), result.getDeserializedHeaders().getETag())); try { String jsonResponse = result.getValue(); BasicDigitalTwin twin = mapper.readValue(jsonResponse, BasicDigitalTwin.class); if (twin.getMetadata().getModelId().equals(modelId)) { CustomDigitalTwin customDigitalTwin = mapper.readValue(jsonResponse, CustomDigitalTwin.class); System.out.println( String.format("%s: Deserialized CustomDigitalTwin, \n\tId=%s, \n\tEtag=%s, \n\tModelId=%s, \n\tAverageTemperature=%d, \n\tTemperatureUnit=%s \n", dtId_String, customDigitalTwin.getId(), customDigitalTwin.getEtag(), customDigitalTwin.getMetadata().getModelId(), customDigitalTwin.getAverageTemperature(), customDigitalTwin.getTemperatureUnit())); } else { System.out.println( String.format("%s: Deserialized BasicDigitalTwin, \n\tId=%s, \n\tEtag=%s, \n\tModelId=%s, \n\tCustomProperties=%s \n", dtId_String, twin.getId(), twin.getTwinETag(), twin.getMetadata().getModelId(), Arrays.toString(twin.getCustomProperties().entrySet().toArray()))); } } catch (JsonProcessingException e) { System.err.println("Reading response into DigitalTwin failed: "); e.printStackTrace(); } }, throwable -> System.err.println("Failed to create source twin on digital twin with Id " + dtId_String + " due to error message " + throwable.getMessage()), createTwinsSemaphore::release); String dtId_Generic = "dt_Generic_" + random.nextInt(); CustomDigitalTwin customDigitalTwin = new CustomDigitalTwin() .setId(dtId_Generic) .setMetadata((CustomDigitalTwinMetadata) new CustomDigitalTwinMetadata().setModelId(modelId)) .setAverageTemperature(random.nextInt(50)) .setTemperatureUnit("Celsius"); Mono<DigitalTwinsResponse<CustomDigitalTwin>> sourceTwinWithResponseGeneric = client.createDigitalTwinWithResponse(dtId_Generic, customDigitalTwin, CustomDigitalTwin.class); sourceTwinWithResponseGeneric.subscribe( result -> { System.out.println(String.format("%s: Created twin, Status = %d, Etag = %s", dtId_Generic, result.getStatusCode(), result.getHeaders().get("etag"))); CustomDigitalTwin twin = result.getValue(); System.out.println(String.format("%s: Deserialized CustomDigitalTwin, \n\tId=%s, \n\tEtag=%s, \n\tModelId=%s, \n\tAverageTemperature=%d, \n\tTemperatureUnit=%s \n", dtId_Generic, twin.getId(), twin.getEtag(), twin.getMetadata().getModelId(), twin.getAverageTemperature(), twin.getTemperatureUnit())); }, throwable -> System.err.println("Failed to create source twin on digital twin with Id " + dtId_Generic + " due to error message " + throwable.getMessage()), createTwinsSemaphore::release); boolean created = createTwinsSemaphore.tryAcquire(2, 20, TimeUnit.SECONDS); System.out.println("Source twins created: " + created); String targetTwin1_Id = "targetTwin_1_" + random.nextInt(); String targetTwin_1 = "{\"$metadata\": {\"$model\": \"dtmi:samples:HVAC;1\"}, \"Efficiency\": 10, \"TargetTemperature\": 10, \"TargetHumidity\": 10}"; String targetTwin2_Id = "targetTwin_2_" + random.nextInt(); String targetTwin_2 = "{\"$metadata\": {\"$model\": \"dtmi:samples:HVAC;1\"}, \"Efficiency\": 50, \"TargetTemperature\": 50, \"TargetHumidity\": 50}"; String targetTwin3_Id = "targetTwin_3_" + random.nextInt(); String targetTwin_3 = "{\"$metadata\": {\"$model\": \"dtmi:samples:Floor;1\"}, \"AverageTemperature\": 100}"; client.createDigitalTwinWithResponse(targetTwin1_Id, targetTwin_1).block(); client.createDigitalTwinWithResponse(targetTwin2_Id, targetTwin_2).block(); client.createDigitalTwinWithResponse(targetTwin3_Id, targetTwin_3).block(); String r_id_1 = "r_id_1_" + random.nextInt(); String r_id_2 = "r_id_2_" + random.nextInt(); String r_id_3 = "r_id_3_" + random.nextInt(); String relationship1 = "{\"$relationshipName\": \"isEquippedWith\", \"$targetId\": \"" + targetTwin1_Id + "\"}"; String relationship2 = "{\"$relationshipName\": \"isEquippedWith\", \"$targetId\": \"" + targetTwin2_Id + "\"}"; String relationship3 = "{\"$relationshipName\": \"has\", \"$targetId\": \"" + targetTwin3_Id + "\"}"; client.createRelationshipWithResponse(dtId_String, r_id_1, relationship1).block(); client.createRelationshipWithResponse(dtId_String, r_id_2, relationship2).block(); client.createRelationshipWithResponse(dtId_String, r_id_3, relationship3).block(); UpdateOperationUtility utility = new UpdateOperationUtility().appendAddOperation("/isAccessRestricted", false); client.updateRelationship(dtId_String, r_id_3, utility.getUpdateOperations()).block(); String createdRelationship2 = client.getRelationship(dtId_String, r_id_2).block(); JsonNode jsonNode = mapper.readTree(createdRelationship2); String etag = jsonNode.path("$etag").textValue(); client.deleteRelationship(dtId_String, r_id_1).block(); client.deleteRelationshipWithResponse(dtId_String, r_id_2, new RequestOptions().setIfMatch(etag)).block(); }
String targetTwin_1 = "{\"$metadata\": {\"$model\": \"dtmi:samples:HVAC;1\"}, \"Efficiency\": 10, \"TargetTemperature\": 10, \"TargetHumidity\": 10}";
public static void main(String[] args) throws InterruptedException, JsonProcessingException { String tenantId = System.getenv("TENANT_ID"); String clientId = System.getenv("CLIENT_ID"); String clientSecret = System.getenv("CLIENT_SECRET"); String endpoint = System.getenv("DIGITAL_TWINS_ENDPOINT"); String modelId = "dtmi:samples:Building;1"; TokenCredential tokenCredential = new ClientSecretCredentialBuilder() .tenantId(tenantId) .clientId(clientId) .clientSecret(clientSecret) .build(); DigitalTwinsAsyncClient client = new DigitalTwinsClientBuilder() .tokenCredential(tokenCredential) .endpoint(endpoint) .httpLogOptions( new HttpLogOptions() .setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .buildAsyncClient(); final Semaphore createTwinsSemaphore = new Semaphore(0); DigitalTwinMetadata metadata = new DigitalTwinMetadata().setModelId(modelId); String dtId_String = "dt_String_" + random.nextInt(); BasicDigitalTwin basicDigitalTwin = new BasicDigitalTwin() .setId(dtId_String) .setMetadata(metadata) .setCustomProperties("AverageTemperature", random.nextInt(50)) .setCustomProperties("TemperatureUnit", "Celsius"); String dt_String = mapper.writeValueAsString(basicDigitalTwin); Mono<DigitalTwinsResponse<String>> sourceTwinWithResponseString = client.createDigitalTwinWithResponse(dtId_String, dt_String); sourceTwinWithResponseString.subscribe( result -> { System.out.println(String.format("%s: Created twin, Status = %d, Etag = %s", dtId_String, result.getStatusCode(), result.getDeserializedHeaders().getETag())); try { String jsonResponse = result.getValue(); BasicDigitalTwin twin = mapper.readValue(jsonResponse, BasicDigitalTwin.class); if (twin.getMetadata().getModelId().equals(modelId)) { CustomDigitalTwin customDigitalTwin = mapper.readValue(jsonResponse, CustomDigitalTwin.class); System.out.println( String.format("%s: Deserialized CustomDigitalTwin, \n\tId=%s, \n\tEtag=%s, \n\tModelId=%s, \n\tAverageTemperature=%d, \n\tTemperatureUnit=%s \n", dtId_String, customDigitalTwin.getId(), customDigitalTwin.getEtag(), customDigitalTwin.getMetadata().getModelId(), customDigitalTwin.getAverageTemperature(), customDigitalTwin.getTemperatureUnit())); } else { System.out.println( String.format("%s: Deserialized BasicDigitalTwin, \n\tId=%s, \n\tEtag=%s, \n\tModelId=%s, \n\tCustomProperties=%s \n", dtId_String, twin.getId(), twin.getTwinETag(), twin.getMetadata().getModelId(), Arrays.toString(twin.getCustomProperties().entrySet().toArray()))); } } catch (JsonProcessingException e) { System.err.println("Reading response into DigitalTwin failed: "); e.printStackTrace(); } }, throwable -> System.err.println("Failed to create source twin on digital twin with Id " + dtId_String + " due to error message " + throwable.getMessage()), createTwinsSemaphore::release); String dtId_Generic = "dt_Generic_" + random.nextInt(); CustomDigitalTwin customDigitalTwin = new CustomDigitalTwin() .setId(dtId_Generic) .setMetadata((CustomDigitalTwinMetadata) new CustomDigitalTwinMetadata().setModelId(modelId)) .setAverageTemperature(random.nextInt(50)) .setTemperatureUnit("Celsius"); Mono<DigitalTwinsResponse<CustomDigitalTwin>> sourceTwinWithResponseGeneric = client.createDigitalTwinWithResponse(dtId_Generic, customDigitalTwin, CustomDigitalTwin.class); sourceTwinWithResponseGeneric.subscribe( result -> { System.out.println(String.format("%s: Created twin, Status = %d, Etag = %s", dtId_Generic, result.getStatusCode(), result.getHeaders().get("etag"))); CustomDigitalTwin twin = result.getValue(); System.out.println(String.format("%s: Deserialized CustomDigitalTwin, \n\tId=%s, \n\tEtag=%s, \n\tModelId=%s, \n\tAverageTemperature=%d, \n\tTemperatureUnit=%s \n", dtId_Generic, twin.getId(), twin.getEtag(), twin.getMetadata().getModelId(), twin.getAverageTemperature(), twin.getTemperatureUnit())); }, throwable -> System.err.println("Failed to create source twin on digital twin with Id " + dtId_Generic + " due to error message " + throwable.getMessage()), createTwinsSemaphore::release); boolean created = createTwinsSemaphore.tryAcquire(2, 20, TimeUnit.SECONDS); System.out.println("Source twins created: " + created); String targetTwin1_Id = "targetTwin_1_" + random.nextInt(); String targetTwin_1 = "{\"$metadata\": {\"$model\": \"dtmi:samples:HVAC;1\"}, \"Efficiency\": 10, \"TargetTemperature\": 10, \"TargetHumidity\": 10}"; String targetTwin2_Id = "targetTwin_2_" + random.nextInt(); String targetTwin_2 = "{\"$metadata\": {\"$model\": \"dtmi:samples:HVAC;1\"}, \"Efficiency\": 50, \"TargetTemperature\": 50, \"TargetHumidity\": 50}"; String targetTwin3_Id = "targetTwin_3_" + random.nextInt(); String targetTwin_3 = "{\"$metadata\": {\"$model\": \"dtmi:samples:Floor;1\"}, \"AverageTemperature\": 100}"; client.createDigitalTwinWithResponse(targetTwin1_Id, targetTwin_1).block(); client.createDigitalTwinWithResponse(targetTwin2_Id, targetTwin_2).block(); client.createDigitalTwinWithResponse(targetTwin3_Id, targetTwin_3).block(); String r_id_1 = "r_id_1_" + random.nextInt(); String r_id_2 = "r_id_2_" + random.nextInt(); String r_id_3 = "r_id_3_" + random.nextInt(); String relationship1 = "{\"$relationshipName\": \"isEquippedWith\", \"$targetId\": \"" + targetTwin1_Id + "\"}"; String relationship2 = "{\"$relationshipName\": \"isEquippedWith\", \"$targetId\": \"" + targetTwin2_Id + "\"}"; String relationship3 = "{\"$relationshipName\": \"has\", \"$targetId\": \"" + targetTwin3_Id + "\"}"; client.createRelationshipWithResponse(dtId_String, r_id_1, relationship1).block(); client.createRelationshipWithResponse(dtId_String, r_id_2, relationship2).block(); client.createRelationshipWithResponse(dtId_String, r_id_3, relationship3).block(); UpdateOperationUtility utility = new UpdateOperationUtility().appendAddOperation("/isAccessRestricted", false); client.updateRelationship(dtId_String, r_id_3, utility.getUpdateOperations()).block(); String createdRelationship2 = client.getRelationship(dtId_String, r_id_2).block(); JsonNode jsonNode = mapper.readTree(createdRelationship2); String etag = jsonNode.path("$etag").textValue(); client.deleteRelationship(dtId_String, r_id_1).block(); client.deleteRelationshipWithResponse(dtId_String, r_id_2, new RequestOptions().setIfMatch(etag)).block(); }
class AsyncSample { private static final ObjectMapper mapper = new ObjectMapper(); private static final Random random = new Random(); }
class AsyncSample { private static final ObjectMapper mapper = new ObjectMapper(); private static final Random random = new Random(); }
Yes, that's correct.
public static void main(String[] args) throws InterruptedException, JsonProcessingException { String tenantId = System.getenv("TENANT_ID"); String clientId = System.getenv("CLIENT_ID"); String clientSecret = System.getenv("CLIENT_SECRET"); String endpoint = System.getenv("DIGITAL_TWINS_ENDPOINT"); String modelId = "dtmi:samples:Building;1"; TokenCredential tokenCredential = new ClientSecretCredentialBuilder() .tenantId(tenantId) .clientId(clientId) .clientSecret(clientSecret) .build(); DigitalTwinsAsyncClient client = new DigitalTwinsClientBuilder() .tokenCredential(tokenCredential) .endpoint(endpoint) .httpLogOptions( new HttpLogOptions() .setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .buildAsyncClient(); final Semaphore createTwinsSemaphore = new Semaphore(0); DigitalTwinMetadata metadata = new DigitalTwinMetadata().setModelId(modelId); String dtId_String = "dt_String_" + random.nextInt(); BasicDigitalTwin basicDigitalTwin = new BasicDigitalTwin() .setId(dtId_String) .setMetadata(metadata) .setCustomProperties("AverageTemperature", random.nextInt(50)) .setCustomProperties("TemperatureUnit", "Celsius"); String dt_String = mapper.writeValueAsString(basicDigitalTwin); Mono<DigitalTwinsResponse<String>> sourceTwinWithResponseString = client.createDigitalTwinWithResponse(dtId_String, dt_String); sourceTwinWithResponseString.subscribe( result -> { System.out.println(String.format("%s: Created twin, Status = %d, Etag = %s", dtId_String, result.getStatusCode(), result.getDeserializedHeaders().getETag())); try { String jsonResponse = result.getValue(); BasicDigitalTwin twin = mapper.readValue(jsonResponse, BasicDigitalTwin.class); if (twin.getMetadata().getModelId().equals(modelId)) { CustomDigitalTwin customDigitalTwin = mapper.readValue(jsonResponse, CustomDigitalTwin.class); System.out.println( String.format("%s: Deserialized CustomDigitalTwin, \n\tId=%s, \n\tEtag=%s, \n\tModelId=%s, \n\tAverageTemperature=%d, \n\tTemperatureUnit=%s \n", dtId_String, customDigitalTwin.getId(), customDigitalTwin.getEtag(), customDigitalTwin.getMetadata().getModelId(), customDigitalTwin.getAverageTemperature(), customDigitalTwin.getTemperatureUnit())); } else { System.out.println( String.format("%s: Deserialized BasicDigitalTwin, \n\tId=%s, \n\tEtag=%s, \n\tModelId=%s, \n\tCustomProperties=%s \n", dtId_String, twin.getId(), twin.getTwinETag(), twin.getMetadata().getModelId(), Arrays.toString(twin.getCustomProperties().entrySet().toArray()))); } } catch (JsonProcessingException e) { System.err.println("Reading response into DigitalTwin failed: "); e.printStackTrace(); } }, throwable -> System.err.println("Failed to create source twin on digital twin with Id " + dtId_String + " due to error message " + throwable.getMessage()), createTwinsSemaphore::release); String dtId_Generic = "dt_Generic_" + random.nextInt(); CustomDigitalTwin customDigitalTwin = new CustomDigitalTwin() .setId(dtId_Generic) .setMetadata((CustomDigitalTwinMetadata) new CustomDigitalTwinMetadata().setModelId(modelId)) .setAverageTemperature(random.nextInt(50)) .setTemperatureUnit("Celsius"); Mono<DigitalTwinsResponse<CustomDigitalTwin>> sourceTwinWithResponseGeneric = client.createDigitalTwinWithResponse(dtId_Generic, customDigitalTwin, CustomDigitalTwin.class); sourceTwinWithResponseGeneric.subscribe( result -> { System.out.println(String.format("%s: Created twin, Status = %d, Etag = %s", dtId_Generic, result.getStatusCode(), result.getHeaders().get("etag"))); CustomDigitalTwin twin = result.getValue(); System.out.println(String.format("%s: Deserialized CustomDigitalTwin, \n\tId=%s, \n\tEtag=%s, \n\tModelId=%s, \n\tAverageTemperature=%d, \n\tTemperatureUnit=%s \n", dtId_Generic, twin.getId(), twin.getEtag(), twin.getMetadata().getModelId(), twin.getAverageTemperature(), twin.getTemperatureUnit())); }, throwable -> System.err.println("Failed to create source twin on digital twin with Id " + dtId_Generic + " due to error message " + throwable.getMessage()), createTwinsSemaphore::release); boolean created = createTwinsSemaphore.tryAcquire(2, 20, TimeUnit.SECONDS); System.out.println("Source twins created: " + created); String targetTwin1_Id = "targetTwin_1_" + random.nextInt(); String targetTwin_1 = "{\"$metadata\": {\"$model\": \"dtmi:samples:HVAC;1\"}, \"Efficiency\": 10, \"TargetTemperature\": 10, \"TargetHumidity\": 10}"; String targetTwin2_Id = "targetTwin_2_" + random.nextInt(); String targetTwin_2 = "{\"$metadata\": {\"$model\": \"dtmi:samples:HVAC;1\"}, \"Efficiency\": 50, \"TargetTemperature\": 50, \"TargetHumidity\": 50}"; String targetTwin3_Id = "targetTwin_3_" + random.nextInt(); String targetTwin_3 = "{\"$metadata\": {\"$model\": \"dtmi:samples:Floor;1\"}, \"AverageTemperature\": 100}"; client.createDigitalTwinWithResponse(targetTwin1_Id, targetTwin_1).block(); client.createDigitalTwinWithResponse(targetTwin2_Id, targetTwin_2).block(); client.createDigitalTwinWithResponse(targetTwin3_Id, targetTwin_3).block(); String r_id_1 = "r_id_1_" + random.nextInt(); String r_id_2 = "r_id_2_" + random.nextInt(); String r_id_3 = "r_id_3_" + random.nextInt(); String relationship1 = "{\"$relationshipName\": \"isEquippedWith\", \"$targetId\": \"" + targetTwin1_Id + "\"}"; String relationship2 = "{\"$relationshipName\": \"isEquippedWith\", \"$targetId\": \"" + targetTwin2_Id + "\"}"; String relationship3 = "{\"$relationshipName\": \"has\", \"$targetId\": \"" + targetTwin3_Id + "\"}"; client.createRelationshipWithResponse(dtId_String, r_id_1, relationship1).block(); client.createRelationshipWithResponse(dtId_String, r_id_2, relationship2).block(); client.createRelationshipWithResponse(dtId_String, r_id_3, relationship3).block(); UpdateOperationUtility utility = new UpdateOperationUtility().appendAddOperation("/isAccessRestricted", false); client.updateRelationship(dtId_String, r_id_3, utility.getUpdateOperations()).block(); String createdRelationship2 = client.getRelationship(dtId_String, r_id_2).block(); JsonNode jsonNode = mapper.readTree(createdRelationship2); String etag = jsonNode.path("$etag").textValue(); client.deleteRelationship(dtId_String, r_id_1).block(); client.deleteRelationshipWithResponse(dtId_String, r_id_2, new RequestOptions().setIfMatch(etag)).block(); }
String targetTwin_1 = "{\"$metadata\": {\"$model\": \"dtmi:samples:HVAC;1\"}, \"Efficiency\": 10, \"TargetTemperature\": 10, \"TargetHumidity\": 10}";
public static void main(String[] args) throws InterruptedException, JsonProcessingException { String tenantId = System.getenv("TENANT_ID"); String clientId = System.getenv("CLIENT_ID"); String clientSecret = System.getenv("CLIENT_SECRET"); String endpoint = System.getenv("DIGITAL_TWINS_ENDPOINT"); String modelId = "dtmi:samples:Building;1"; TokenCredential tokenCredential = new ClientSecretCredentialBuilder() .tenantId(tenantId) .clientId(clientId) .clientSecret(clientSecret) .build(); DigitalTwinsAsyncClient client = new DigitalTwinsClientBuilder() .tokenCredential(tokenCredential) .endpoint(endpoint) .httpLogOptions( new HttpLogOptions() .setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .buildAsyncClient(); final Semaphore createTwinsSemaphore = new Semaphore(0); DigitalTwinMetadata metadata = new DigitalTwinMetadata().setModelId(modelId); String dtId_String = "dt_String_" + random.nextInt(); BasicDigitalTwin basicDigitalTwin = new BasicDigitalTwin() .setId(dtId_String) .setMetadata(metadata) .setCustomProperties("AverageTemperature", random.nextInt(50)) .setCustomProperties("TemperatureUnit", "Celsius"); String dt_String = mapper.writeValueAsString(basicDigitalTwin); Mono<DigitalTwinsResponse<String>> sourceTwinWithResponseString = client.createDigitalTwinWithResponse(dtId_String, dt_String); sourceTwinWithResponseString.subscribe( result -> { System.out.println(String.format("%s: Created twin, Status = %d, Etag = %s", dtId_String, result.getStatusCode(), result.getDeserializedHeaders().getETag())); try { String jsonResponse = result.getValue(); BasicDigitalTwin twin = mapper.readValue(jsonResponse, BasicDigitalTwin.class); if (twin.getMetadata().getModelId().equals(modelId)) { CustomDigitalTwin customDigitalTwin = mapper.readValue(jsonResponse, CustomDigitalTwin.class); System.out.println( String.format("%s: Deserialized CustomDigitalTwin, \n\tId=%s, \n\tEtag=%s, \n\tModelId=%s, \n\tAverageTemperature=%d, \n\tTemperatureUnit=%s \n", dtId_String, customDigitalTwin.getId(), customDigitalTwin.getEtag(), customDigitalTwin.getMetadata().getModelId(), customDigitalTwin.getAverageTemperature(), customDigitalTwin.getTemperatureUnit())); } else { System.out.println( String.format("%s: Deserialized BasicDigitalTwin, \n\tId=%s, \n\tEtag=%s, \n\tModelId=%s, \n\tCustomProperties=%s \n", dtId_String, twin.getId(), twin.getTwinETag(), twin.getMetadata().getModelId(), Arrays.toString(twin.getCustomProperties().entrySet().toArray()))); } } catch (JsonProcessingException e) { System.err.println("Reading response into DigitalTwin failed: "); e.printStackTrace(); } }, throwable -> System.err.println("Failed to create source twin on digital twin with Id " + dtId_String + " due to error message " + throwable.getMessage()), createTwinsSemaphore::release); String dtId_Generic = "dt_Generic_" + random.nextInt(); CustomDigitalTwin customDigitalTwin = new CustomDigitalTwin() .setId(dtId_Generic) .setMetadata((CustomDigitalTwinMetadata) new CustomDigitalTwinMetadata().setModelId(modelId)) .setAverageTemperature(random.nextInt(50)) .setTemperatureUnit("Celsius"); Mono<DigitalTwinsResponse<CustomDigitalTwin>> sourceTwinWithResponseGeneric = client.createDigitalTwinWithResponse(dtId_Generic, customDigitalTwin, CustomDigitalTwin.class); sourceTwinWithResponseGeneric.subscribe( result -> { System.out.println(String.format("%s: Created twin, Status = %d, Etag = %s", dtId_Generic, result.getStatusCode(), result.getHeaders().get("etag"))); CustomDigitalTwin twin = result.getValue(); System.out.println(String.format("%s: Deserialized CustomDigitalTwin, \n\tId=%s, \n\tEtag=%s, \n\tModelId=%s, \n\tAverageTemperature=%d, \n\tTemperatureUnit=%s \n", dtId_Generic, twin.getId(), twin.getEtag(), twin.getMetadata().getModelId(), twin.getAverageTemperature(), twin.getTemperatureUnit())); }, throwable -> System.err.println("Failed to create source twin on digital twin with Id " + dtId_Generic + " due to error message " + throwable.getMessage()), createTwinsSemaphore::release); boolean created = createTwinsSemaphore.tryAcquire(2, 20, TimeUnit.SECONDS); System.out.println("Source twins created: " + created); String targetTwin1_Id = "targetTwin_1_" + random.nextInt(); String targetTwin_1 = "{\"$metadata\": {\"$model\": \"dtmi:samples:HVAC;1\"}, \"Efficiency\": 10, \"TargetTemperature\": 10, \"TargetHumidity\": 10}"; String targetTwin2_Id = "targetTwin_2_" + random.nextInt(); String targetTwin_2 = "{\"$metadata\": {\"$model\": \"dtmi:samples:HVAC;1\"}, \"Efficiency\": 50, \"TargetTemperature\": 50, \"TargetHumidity\": 50}"; String targetTwin3_Id = "targetTwin_3_" + random.nextInt(); String targetTwin_3 = "{\"$metadata\": {\"$model\": \"dtmi:samples:Floor;1\"}, \"AverageTemperature\": 100}"; client.createDigitalTwinWithResponse(targetTwin1_Id, targetTwin_1).block(); client.createDigitalTwinWithResponse(targetTwin2_Id, targetTwin_2).block(); client.createDigitalTwinWithResponse(targetTwin3_Id, targetTwin_3).block(); String r_id_1 = "r_id_1_" + random.nextInt(); String r_id_2 = "r_id_2_" + random.nextInt(); String r_id_3 = "r_id_3_" + random.nextInt(); String relationship1 = "{\"$relationshipName\": \"isEquippedWith\", \"$targetId\": \"" + targetTwin1_Id + "\"}"; String relationship2 = "{\"$relationshipName\": \"isEquippedWith\", \"$targetId\": \"" + targetTwin2_Id + "\"}"; String relationship3 = "{\"$relationshipName\": \"has\", \"$targetId\": \"" + targetTwin3_Id + "\"}"; client.createRelationshipWithResponse(dtId_String, r_id_1, relationship1).block(); client.createRelationshipWithResponse(dtId_String, r_id_2, relationship2).block(); client.createRelationshipWithResponse(dtId_String, r_id_3, relationship3).block(); UpdateOperationUtility utility = new UpdateOperationUtility().appendAddOperation("/isAccessRestricted", false); client.updateRelationship(dtId_String, r_id_3, utility.getUpdateOperations()).block(); String createdRelationship2 = client.getRelationship(dtId_String, r_id_2).block(); JsonNode jsonNode = mapper.readTree(createdRelationship2); String etag = jsonNode.path("$etag").textValue(); client.deleteRelationship(dtId_String, r_id_1).block(); client.deleteRelationshipWithResponse(dtId_String, r_id_2, new RequestOptions().setIfMatch(etag)).block(); }
class AsyncSample { private static final ObjectMapper mapper = new ObjectMapper(); private static final Random random = new Random(); }
class AsyncSample { private static final ObjectMapper mapper = new ObjectMapper(); private static final Random random = new Random(); }
Why not define `listModelOptions.getDependenciesFor` as a List to begin with? If the PL is restrictive, then any advantage in the public API being flexible? Is there a risk of running into conversion errors by simply casting it to a List -> would a for-each loop be better: https://www.baeldung.com/java-iterable-to-collection?
Mono<PagedResponse<ModelData>> listModelsSinglePageAsync(ListModelOptions listModelOptions, Context context){ return protocolLayer.getDigitalTwinModels().listSinglePageAsync( (List<String>) listModelOptions.getDependenciesFor(), listModelOptions.getIncludeModelDefinition(), new DigitalTwinModelsListOptions().setMaxItemCount(listModelOptions.getMaxItemCount()), context); }
(List<String>) listModelOptions.getDependenciesFor(),
new DigitalTwinModelsListOptions().setMaxItemCount(listModelOptions.getMaxItemCount()), context); } Mono<PagedResponse<ModelData>> listModelsNextSinglePageAsync(String nextLink, Context context){ return protocolLayer.getDigitalTwinModels().listNextSinglePageAsync(nextLink, context); }
class DigitalTwinsAsyncClient { private static final ClientLogger logger = new ClientLogger(DigitalTwinsAsyncClient.class); private static final ObjectMapper mapper = new ObjectMapper(); private final DigitalTwinsServiceVersion serviceVersion; private final AzureDigitalTwinsAPIImpl protocolLayer; private static final Boolean includeModelDefinition = true; DigitalTwinsAsyncClient(HttpPipeline pipeline, DigitalTwinsServiceVersion serviceVersion, String host) { final SimpleModule stringModule = new SimpleModule("String Serializer"); stringModule.addSerializer(new DigitalTwinsStringSerializer(String.class, mapper)); JacksonAdapter jacksonAdapter = new JacksonAdapter(); jacksonAdapter.serializer().registerModule(stringModule); this.protocolLayer = new AzureDigitalTwinsAPIImplBuilder() .host(host) .pipeline(pipeline) .serializerAdapter(jacksonAdapter) .buildClient(); this.serviceVersion = serviceVersion; } /** * Gets the Azure Digital Twins service API version that this client is configured to use for all service requests. * Unless configured while building this client through {@link DigitalTwinsClientBuilder * this value will be equal to the latest service API version supported by this client. * * @return The Azure Digital Twins service API version. */ public DigitalTwinsServiceVersion getServiceVersion() { return this.serviceVersion; } /** * Gets the {@link HttpPipeline} that this client is configured to use for all service requests. This pipeline can * be customized while building this client through {@link DigitalTwinsClientBuilder * * @return The {@link HttpPipeline} that this client uses for all service requests. */ public HttpPipeline getHttpPipeline() { return this.protocolLayer.getHttpPipeline(); } @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DigitalTwinsResponse<String>> createDigitalTwinWithResponse(String digitalTwinId, String digitalTwin) { return withContext(context -> createDigitalTwinWithResponse(digitalTwinId, digitalTwin, context)); } @ServiceMethod(returns = ReturnType.SINGLE) public <T> Mono<DigitalTwinsResponse<T>> createDigitalTwinWithResponse(String digitalTwinId, Object digitalTwin, Class<T> klazz) { return withContext(context -> createDigitalTwinWithResponse(digitalTwinId, digitalTwin, klazz, context)); } Mono<DigitalTwinsResponse<String>> createDigitalTwinWithResponse(String digitalTwinId, String digitalTwin, Context context) { return protocolLayer .getDigitalTwins() .addWithResponseAsync(digitalTwinId, digitalTwin, context) .flatMap( response -> { try { String jsonResponse = mapper.writeValueAsString(response.getValue()); DigitalTwinsResponseHeaders twinHeaders = mapper.convertValue(response.getDeserializedHeaders(), DigitalTwinsResponseHeaders.class); return Mono.just(new DigitalTwinsResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), jsonResponse, twinHeaders)); } catch (JsonProcessingException e) { return Mono.error(e); } }); } <T> Mono<DigitalTwinsResponse<T>> createDigitalTwinWithResponse(String digitalTwinId, Object digitalTwin, Class<T> klazz, Context context) { return protocolLayer .getDigitalTwins() .addWithResponseAsync(digitalTwinId, digitalTwin, context) .flatMap( response -> { T genericResponse = mapper.convertValue(response.getValue(), klazz); DigitalTwinsResponseHeaders twinHeaders = mapper.convertValue(response.getDeserializedHeaders(), DigitalTwinsResponseHeaders.class); return Mono.just(new DigitalTwinsResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), genericResponse, twinHeaders)); }); } /** * Creates a relationship on a digital twin. * * @param digitalTwinId The Id of the source digital twin. * @param relationshipId The Id of the relationship to be created. * @param relationship The application/json relationship to be created. * @return A REST response containing the application/json relationship created. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<String>> createRelationshipWithResponse(String digitalTwinId, String relationshipId, String relationship) { return withContext(context -> createRelationshipWithResponse(digitalTwinId, relationshipId, relationship, context)); } Mono<Response<String>> createRelationshipWithResponse(String digitalTwinId, String relationshipId, String relationship, Context context) { return protocolLayer .getDigitalTwins() .addRelationshipWithResponseAsync(digitalTwinId, relationshipId, relationship, context) .flatMap( response -> { try { String jsonResponse = mapper.writeValueAsString(response.getValue()); return Mono.just(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), jsonResponse)); } catch (JsonProcessingException e) { return Mono.error(e); } }); } /** * Gets all the relationships on a digital twin filtered by the relationship name, by iterating through a collection. * * @param digitalTwinId The Id of the source digital twin. * @param relationshipName The name of a relationship to filter to. * @return A {@link PagedFlux} of application/json relationships belonging to the specified digital twin and the http response. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<String> listRelationships(String digitalTwinId, String relationshipName) { return new PagedFlux<>( () -> withContext(context -> listRelationshipsSinglePageAsync(digitalTwinId, relationshipName, context)), nextLink -> withContext(context -> listRelationshipsNextSinglePageAsync(nextLink, context))); } PagedFlux<String> listRelationships(String digitalTwinId, String relationshipName, Context context) { return new PagedFlux<>( () -> listRelationshipsSinglePageAsync(digitalTwinId, relationshipName, context), nextLink -> listRelationshipsNextSinglePageAsync(nextLink, context)); } Mono<PagedResponse<String>> listRelationshipsSinglePageAsync(String digitalTwinId, String relationshipName, Context context) { return protocolLayer.getDigitalTwins().listRelationshipsSinglePageAsync(digitalTwinId, relationshipName, context) .map( objectPagedResponse -> { List<String> stringList = objectPagedResponse.getValue().stream() .map(object -> { try { return mapper.writeValueAsString(object); } catch (JsonProcessingException e) { logger.error("Could not parse the returned relationship [%s]: %s", object, e); return null; } }) .filter(Objects::nonNull) .collect(Collectors.toList()); return new PagedResponseBase<>( objectPagedResponse.getRequest(), objectPagedResponse.getStatusCode(), objectPagedResponse.getHeaders(), stringList, objectPagedResponse.getContinuationToken(), ((PagedResponseBase) objectPagedResponse).getDeserializedHeaders()); } ); } Mono<PagedResponse<String>> listRelationshipsNextSinglePageAsync(String nextLink, Context context) { return protocolLayer.getDigitalTwins().listRelationshipsNextSinglePageAsync(nextLink, context) .map(objectPagedResponse -> { List<String> stringList = objectPagedResponse.getValue().stream() .map(object -> { try { return mapper.writeValueAsString(object); } catch (JsonProcessingException e) { logger.error("Could not parse the returned relationship [%s]: %s", object, e); return null; } }) .filter(Objects::nonNull) .collect(Collectors.toList()); return new PagedResponseBase<>( objectPagedResponse.getRequest(), objectPagedResponse.getStatusCode(), objectPagedResponse.getHeaders(), stringList, objectPagedResponse.getContinuationToken(), ((PagedResponseBase)objectPagedResponse).getDeserializedHeaders()); }); } /** * Creates one or many models. * @param models The list of models to create. Each string corresponds to exactly one model. * @return A {@link PagedFlux} of created models. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<ModelData> createModels(List<String> models) { return new PagedFlux<>( () -> withContext(context -> createModelsSinglePageAsync(models, context)), nextLink -> withContext(context -> Mono.empty())); } PagedFlux<ModelData> createModels(List<String> models, Context context){ return new PagedFlux<>( () -> createModelsSinglePageAsync(models, context), nextLink -> Mono.empty()); } Mono<PagedResponse<ModelData>> createModelsSinglePageAsync(List<String> models, Context context) { List<Object> modelsPayload = new ArrayList<>(); for (String model: models) { try { modelsPayload.add(mapper.readValue(model, Object.class)); } catch (JsonProcessingException e) { logger.error("Could not parse the model payload [%s]: %s", model, e); return Mono.error(e); } } return protocolLayer.getDigitalTwinModels().addWithResponseAsync(modelsPayload, context) .map( listResponse -> new PagedResponseBase<>( listResponse.getRequest(), listResponse.getStatusCode(), listResponse.getHeaders(), listResponse.getValue(), null, ((ResponseBase)listResponse).getDeserializedHeaders())); } /** * Gets a model, including the model metadata and the model definition. * @param modelId The Id of the model. * @return The application/json model */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<ModelData> getModel(String modelId) { return withContext(context -> getModel(modelId, context)); } Mono<ModelData> getModel(String modelId, Context context){ return protocolLayer.getDigitalTwinModels().getByIdWithResponseAsync(modelId, includeModelDefinition, context) .flatMap(modelDataResponse -> Mono.just(mapper.convertValue(modelDataResponse.getValue(), ModelData.class))); } /** * Gets a model, including the model metadata and the model definition. * @param modelId The Id of the model. * @return The application/json model */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<ModelData>> getModelWithResponse(String modelId) { return withContext(context -> getModelWithResponse(modelId, context)); } Mono<Response<ModelData>> getModelWithResponse(String modelId, Context context){ return protocolLayer .getDigitalTwinModels() .getByIdWithResponseAsync(modelId, includeModelDefinition, context); } /** * Gets the list of models by iterating through a collection. * @param listModelOptions The options for the list operation. * @return A {@link PagedFlux} of ModelData. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<ModelData> listModels(ListModelOptions listModelOptions) { return new PagedFlux<>( () -> withContext(context -> listModelsSinglePageAsync(listModelOptions, context)), nextLink -> withContext(context -> listModelsNextSinglePageAsync(nextLink, context))); } PagedFlux<ModelData> listModels(ListModelOptions listModelOptions, Context context){ return new PagedFlux<>( () -> listModelsSinglePageAsync(listModelOptions, context), nextLink -> listModelsNextSinglePageAsync(nextLink, context)); } Mono<PagedResponse<ModelData>> listModelsSinglePageAsync(ListModelOptions listModelOptions, Context context){ return protocolLayer.getDigitalTwinModels().listSinglePageAsync( (List<String>) listModelOptions.getDependenciesFor(), listModelOptions.getIncludeModelDefinition(), /** * Gets the list of models by iterating through a collection. * @return A {@link PagedFlux} of ModelData. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<ModelData> listModels() { return listModels(new ListModelOptions()); } PagedFlux<ModelData> listModels(Context context){ return new PagedFlux<>( () -> listModelsSinglePageAsync(new ListModelOptions(), context), nextLink -> listModelsNextSinglePageAsync(nextLink, context)); } }
class DigitalTwinsAsyncClient { private static final ClientLogger logger = new ClientLogger(DigitalTwinsAsyncClient.class); private static final ObjectMapper mapper = new ObjectMapper(); private final DigitalTwinsServiceVersion serviceVersion; private final AzureDigitalTwinsAPIImpl protocolLayer; private static final Boolean includeModelDefinition = true; DigitalTwinsAsyncClient(HttpPipeline pipeline, DigitalTwinsServiceVersion serviceVersion, String host) { final SimpleModule stringModule = new SimpleModule("String Serializer"); stringModule.addSerializer(new DigitalTwinsStringSerializer(String.class, mapper)); JacksonAdapter jacksonAdapter = new JacksonAdapter(); jacksonAdapter.serializer().registerModule(stringModule); this.protocolLayer = new AzureDigitalTwinsAPIImplBuilder() .host(host) .pipeline(pipeline) .serializerAdapter(jacksonAdapter) .buildClient(); this.serviceVersion = serviceVersion; } /** * Gets the Azure Digital Twins service API version that this client is configured to use for all service requests. * Unless configured while building this client through {@link DigitalTwinsClientBuilder * this value will be equal to the latest service API version supported by this client. * * @return The Azure Digital Twins service API version. */ public DigitalTwinsServiceVersion getServiceVersion() { return this.serviceVersion; } /** * Gets the {@link HttpPipeline} that this client is configured to use for all service requests. This pipeline can * be customized while building this client through {@link DigitalTwinsClientBuilder * * @return The {@link HttpPipeline} that this client uses for all service requests. */ public HttpPipeline getHttpPipeline() { return this.protocolLayer.getHttpPipeline(); } @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DigitalTwinsResponse<String>> createDigitalTwinWithResponse(String digitalTwinId, String digitalTwin) { return withContext(context -> createDigitalTwinWithResponse(digitalTwinId, digitalTwin, context)); } @ServiceMethod(returns = ReturnType.SINGLE) public <T> Mono<DigitalTwinsResponse<T>> createDigitalTwinWithResponse(String digitalTwinId, Object digitalTwin, Class<T> klazz) { return withContext(context -> createDigitalTwinWithResponse(digitalTwinId, digitalTwin, klazz, context)); } Mono<DigitalTwinsResponse<String>> createDigitalTwinWithResponse(String digitalTwinId, String digitalTwin, Context context) { return protocolLayer .getDigitalTwins() .addWithResponseAsync(digitalTwinId, digitalTwin, context) .flatMap( response -> { try { String jsonResponse = mapper.writeValueAsString(response.getValue()); DigitalTwinsResponseHeaders twinHeaders = mapper.convertValue(response.getDeserializedHeaders(), DigitalTwinsResponseHeaders.class); return Mono.just(new DigitalTwinsResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), jsonResponse, twinHeaders)); } catch (JsonProcessingException e) { return Mono.error(e); } }); } <T> Mono<DigitalTwinsResponse<T>> createDigitalTwinWithResponse(String digitalTwinId, Object digitalTwin, Class<T> klazz, Context context) { return protocolLayer .getDigitalTwins() .addWithResponseAsync(digitalTwinId, digitalTwin, context) .flatMap( response -> { T genericResponse = mapper.convertValue(response.getValue(), klazz); DigitalTwinsResponseHeaders twinHeaders = mapper.convertValue(response.getDeserializedHeaders(), DigitalTwinsResponseHeaders.class); return Mono.just(new DigitalTwinsResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), genericResponse, twinHeaders)); }); } /** * Creates a relationship on a digital twin. * * @param digitalTwinId The Id of the source digital twin. * @param relationshipId The Id of the relationship to be created. * @param relationship The application/json relationship to be created. * @return A REST response containing the application/json relationship created. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<String>> createRelationshipWithResponse(String digitalTwinId, String relationshipId, String relationship) { return withContext(context -> createRelationshipWithResponse(digitalTwinId, relationshipId, relationship, context)); } Mono<Response<String>> createRelationshipWithResponse(String digitalTwinId, String relationshipId, String relationship, Context context) { return protocolLayer .getDigitalTwins() .addRelationshipWithResponseAsync(digitalTwinId, relationshipId, relationship, context) .flatMap( response -> { try { String jsonResponse = mapper.writeValueAsString(response.getValue()); return Mono.just(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), jsonResponse)); } catch (JsonProcessingException e) { return Mono.error(e); } }); } /** * Gets all the relationships on a digital twin filtered by the relationship name, by iterating through a collection. * * @param digitalTwinId The Id of the source digital twin. * @param relationshipName The name of a relationship to filter to. * @return A {@link PagedFlux} of application/json relationships belonging to the specified digital twin and the http response. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<String> listRelationships(String digitalTwinId, String relationshipName) { return new PagedFlux<>( () -> withContext(context -> listRelationshipsSinglePageAsync(digitalTwinId, relationshipName, context)), nextLink -> withContext(context -> listRelationshipsNextSinglePageAsync(nextLink, context))); } PagedFlux<String> listRelationships(String digitalTwinId, String relationshipName, Context context) { return new PagedFlux<>( () -> listRelationshipsSinglePageAsync(digitalTwinId, relationshipName, context), nextLink -> listRelationshipsNextSinglePageAsync(nextLink, context)); } Mono<PagedResponse<String>> listRelationshipsSinglePageAsync(String digitalTwinId, String relationshipName, Context context) { return protocolLayer.getDigitalTwins().listRelationshipsSinglePageAsync(digitalTwinId, relationshipName, context) .map( objectPagedResponse -> { List<String> stringList = objectPagedResponse.getValue().stream() .map(object -> { try { return mapper.writeValueAsString(object); } catch (JsonProcessingException e) { logger.error("Could not parse the returned relationship [%s]: %s", object, e); return null; } }) .filter(Objects::nonNull) .collect(Collectors.toList()); return new PagedResponseBase<>( objectPagedResponse.getRequest(), objectPagedResponse.getStatusCode(), objectPagedResponse.getHeaders(), stringList, objectPagedResponse.getContinuationToken(), ((PagedResponseBase) objectPagedResponse).getDeserializedHeaders()); } ); } Mono<PagedResponse<String>> listRelationshipsNextSinglePageAsync(String nextLink, Context context) { return protocolLayer.getDigitalTwins().listRelationshipsNextSinglePageAsync(nextLink, context) .map(objectPagedResponse -> { List<String> stringList = objectPagedResponse.getValue().stream() .map(object -> { try { return mapper.writeValueAsString(object); } catch (JsonProcessingException e) { logger.error("Could not parse the returned relationship [%s]: %s", object, e); return null; } }) .filter(Objects::nonNull) .collect(Collectors.toList()); return new PagedResponseBase<>( objectPagedResponse.getRequest(), objectPagedResponse.getStatusCode(), objectPagedResponse.getHeaders(), stringList, objectPagedResponse.getContinuationToken(), ((PagedResponseBase)objectPagedResponse).getDeserializedHeaders()); }); } /** * Creates one or many models. * @param models The list of models to create. Each string corresponds to exactly one model. * @return A {@link PagedFlux} of created models and the http response. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<ModelData> createModels(List<String> models) { return new PagedFlux<>( () -> withContext(context -> createModelsSinglePageAsync(models, context)), nextLink -> withContext(context -> Mono.empty())); } PagedFlux<ModelData> createModels(List<String> models, Context context){ return new PagedFlux<>( () -> createModelsSinglePageAsync(models, context), nextLink -> Mono.empty()); } Mono<PagedResponse<ModelData>> createModelsSinglePageAsync(List<String> models, Context context) { List<Object> modelsPayload = new ArrayList<>(); for (String model: models) { try { modelsPayload.add(mapper.readValue(model, Object.class)); } catch (JsonProcessingException e) { logger.error("Could not parse the model payload [%s]: %s", model, e); return Mono.error(e); } } return protocolLayer.getDigitalTwinModels().addWithResponseAsync(modelsPayload, context) .map( listResponse -> new PagedResponseBase<>( listResponse.getRequest(), listResponse.getStatusCode(), listResponse.getHeaders(), listResponse.getValue(), null, ((ResponseBase)listResponse).getDeserializedHeaders())); } /** * Gets a model, including the model metadata and the model definition. * @param modelId The Id of the model. * @return The ModelData */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<ModelData> getModel(String modelId) { return withContext(context -> getModelWithResponse(modelId, context)) .flatMap(response -> Mono.just(response.getValue())); } /** * Gets a model, including the model metadata and the model definition. * @param modelId The Id of the model. * @return The ModelData and the http response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<ModelData>> getModelWithResponse(String modelId) { return withContext(context -> getModelWithResponse(modelId, context)); } Mono<Response<ModelData>> getModelWithResponse(String modelId, Context context){ return protocolLayer .getDigitalTwinModels() .getByIdWithResponseAsync(modelId, includeModelDefinition, context); } /** * Gets the list of models by iterating through a collection. * @param listModelOptions The options to follow when listing the models. For example, the page size hint can be specified. * @return A {@link PagedFlux} of ModelData and the http response. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<ModelData> listModels(ListModelOptions listModelOptions) { return new PagedFlux<>( () -> withContext(context -> listModelsSinglePageAsync(listModelOptions, context)), nextLink -> withContext(context -> listModelsNextSinglePageAsync(nextLink, context))); } /** * Gets the list of models by iterating through a collection. * @return A {@link PagedFlux} of ModelData and the http response. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<ModelData> listModels() { return listModels(new ListModelOptions()); } PagedFlux<ModelData> listModels(Context context){ return new PagedFlux<>( () -> listModelsSinglePageAsync(new ListModelOptions(), context), nextLink -> listModelsNextSinglePageAsync(nextLink, context)); } PagedFlux<ModelData> listModels(ListModelOptions listModelOptions, Context context){ return new PagedFlux<>( () -> listModelsSinglePageAsync(listModelOptions, context), nextLink -> listModelsNextSinglePageAsync(nextLink, context)); } Mono<PagedResponse<ModelData>> listModelsSinglePageAsync(ListModelOptions listModelOptions, Context context){ return protocolLayer.getDigitalTwinModels().listSinglePageAsync( listModelOptions.getDependenciesFor(), listModelOptions.getIncludeModelDefinition(), /** * Deletes a model. * @param modelId The Id for the model. The Id is globally unique and case sensitive. * @return An empty Mono */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteModel(String modelId) { return withContext(context -> deleteModelWithResponse(modelId, context)) .flatMap(response -> Mono.just(response.getValue())); } /** * Deletes a model. * @param modelId The Id for the model. The Id is globally unique and case sensitive. * @return The http response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteModelWithResponse(String modelId) { return withContext(context -> deleteModelWithResponse(modelId, context)); } Mono<Response<Void>> deleteModelWithResponse(String modelId, Context context){ return protocolLayer.getDigitalTwinModels().deleteWithResponseAsync(modelId, context); } }
I think so too, I will change all the types back to List since that is the input the protocol layer accepts and I don't see a point in going more generic than that
Mono<PagedResponse<ModelData>> listModelsSinglePageAsync(ListModelOptions listModelOptions, Context context){ return protocolLayer.getDigitalTwinModels().listSinglePageAsync( (List<String>) listModelOptions.getDependenciesFor(), listModelOptions.getIncludeModelDefinition(), new DigitalTwinModelsListOptions().setMaxItemCount(listModelOptions.getMaxItemCount()), context); }
(List<String>) listModelOptions.getDependenciesFor(),
new DigitalTwinModelsListOptions().setMaxItemCount(listModelOptions.getMaxItemCount()), context); } Mono<PagedResponse<ModelData>> listModelsNextSinglePageAsync(String nextLink, Context context){ return protocolLayer.getDigitalTwinModels().listNextSinglePageAsync(nextLink, context); }
class DigitalTwinsAsyncClient { private static final ClientLogger logger = new ClientLogger(DigitalTwinsAsyncClient.class); private static final ObjectMapper mapper = new ObjectMapper(); private final DigitalTwinsServiceVersion serviceVersion; private final AzureDigitalTwinsAPIImpl protocolLayer; private static final Boolean includeModelDefinition = true; DigitalTwinsAsyncClient(HttpPipeline pipeline, DigitalTwinsServiceVersion serviceVersion, String host) { final SimpleModule stringModule = new SimpleModule("String Serializer"); stringModule.addSerializer(new DigitalTwinsStringSerializer(String.class, mapper)); JacksonAdapter jacksonAdapter = new JacksonAdapter(); jacksonAdapter.serializer().registerModule(stringModule); this.protocolLayer = new AzureDigitalTwinsAPIImplBuilder() .host(host) .pipeline(pipeline) .serializerAdapter(jacksonAdapter) .buildClient(); this.serviceVersion = serviceVersion; } /** * Gets the Azure Digital Twins service API version that this client is configured to use for all service requests. * Unless configured while building this client through {@link DigitalTwinsClientBuilder * this value will be equal to the latest service API version supported by this client. * * @return The Azure Digital Twins service API version. */ public DigitalTwinsServiceVersion getServiceVersion() { return this.serviceVersion; } /** * Gets the {@link HttpPipeline} that this client is configured to use for all service requests. This pipeline can * be customized while building this client through {@link DigitalTwinsClientBuilder * * @return The {@link HttpPipeline} that this client uses for all service requests. */ public HttpPipeline getHttpPipeline() { return this.protocolLayer.getHttpPipeline(); } @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DigitalTwinsResponse<String>> createDigitalTwinWithResponse(String digitalTwinId, String digitalTwin) { return withContext(context -> createDigitalTwinWithResponse(digitalTwinId, digitalTwin, context)); } @ServiceMethod(returns = ReturnType.SINGLE) public <T> Mono<DigitalTwinsResponse<T>> createDigitalTwinWithResponse(String digitalTwinId, Object digitalTwin, Class<T> klazz) { return withContext(context -> createDigitalTwinWithResponse(digitalTwinId, digitalTwin, klazz, context)); } Mono<DigitalTwinsResponse<String>> createDigitalTwinWithResponse(String digitalTwinId, String digitalTwin, Context context) { return protocolLayer .getDigitalTwins() .addWithResponseAsync(digitalTwinId, digitalTwin, context) .flatMap( response -> { try { String jsonResponse = mapper.writeValueAsString(response.getValue()); DigitalTwinsResponseHeaders twinHeaders = mapper.convertValue(response.getDeserializedHeaders(), DigitalTwinsResponseHeaders.class); return Mono.just(new DigitalTwinsResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), jsonResponse, twinHeaders)); } catch (JsonProcessingException e) { return Mono.error(e); } }); } <T> Mono<DigitalTwinsResponse<T>> createDigitalTwinWithResponse(String digitalTwinId, Object digitalTwin, Class<T> klazz, Context context) { return protocolLayer .getDigitalTwins() .addWithResponseAsync(digitalTwinId, digitalTwin, context) .flatMap( response -> { T genericResponse = mapper.convertValue(response.getValue(), klazz); DigitalTwinsResponseHeaders twinHeaders = mapper.convertValue(response.getDeserializedHeaders(), DigitalTwinsResponseHeaders.class); return Mono.just(new DigitalTwinsResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), genericResponse, twinHeaders)); }); } /** * Creates a relationship on a digital twin. * * @param digitalTwinId The Id of the source digital twin. * @param relationshipId The Id of the relationship to be created. * @param relationship The application/json relationship to be created. * @return A REST response containing the application/json relationship created. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<String>> createRelationshipWithResponse(String digitalTwinId, String relationshipId, String relationship) { return withContext(context -> createRelationshipWithResponse(digitalTwinId, relationshipId, relationship, context)); } Mono<Response<String>> createRelationshipWithResponse(String digitalTwinId, String relationshipId, String relationship, Context context) { return protocolLayer .getDigitalTwins() .addRelationshipWithResponseAsync(digitalTwinId, relationshipId, relationship, context) .flatMap( response -> { try { String jsonResponse = mapper.writeValueAsString(response.getValue()); return Mono.just(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), jsonResponse)); } catch (JsonProcessingException e) { return Mono.error(e); } }); } /** * Gets all the relationships on a digital twin filtered by the relationship name, by iterating through a collection. * * @param digitalTwinId The Id of the source digital twin. * @param relationshipName The name of a relationship to filter to. * @return A {@link PagedFlux} of application/json relationships belonging to the specified digital twin and the http response. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<String> listRelationships(String digitalTwinId, String relationshipName) { return new PagedFlux<>( () -> withContext(context -> listRelationshipsSinglePageAsync(digitalTwinId, relationshipName, context)), nextLink -> withContext(context -> listRelationshipsNextSinglePageAsync(nextLink, context))); } PagedFlux<String> listRelationships(String digitalTwinId, String relationshipName, Context context) { return new PagedFlux<>( () -> listRelationshipsSinglePageAsync(digitalTwinId, relationshipName, context), nextLink -> listRelationshipsNextSinglePageAsync(nextLink, context)); } Mono<PagedResponse<String>> listRelationshipsSinglePageAsync(String digitalTwinId, String relationshipName, Context context) { return protocolLayer.getDigitalTwins().listRelationshipsSinglePageAsync(digitalTwinId, relationshipName, context) .map( objectPagedResponse -> { List<String> stringList = objectPagedResponse.getValue().stream() .map(object -> { try { return mapper.writeValueAsString(object); } catch (JsonProcessingException e) { logger.error("Could not parse the returned relationship [%s]: %s", object, e); return null; } }) .filter(Objects::nonNull) .collect(Collectors.toList()); return new PagedResponseBase<>( objectPagedResponse.getRequest(), objectPagedResponse.getStatusCode(), objectPagedResponse.getHeaders(), stringList, objectPagedResponse.getContinuationToken(), ((PagedResponseBase) objectPagedResponse).getDeserializedHeaders()); } ); } Mono<PagedResponse<String>> listRelationshipsNextSinglePageAsync(String nextLink, Context context) { return protocolLayer.getDigitalTwins().listRelationshipsNextSinglePageAsync(nextLink, context) .map(objectPagedResponse -> { List<String> stringList = objectPagedResponse.getValue().stream() .map(object -> { try { return mapper.writeValueAsString(object); } catch (JsonProcessingException e) { logger.error("Could not parse the returned relationship [%s]: %s", object, e); return null; } }) .filter(Objects::nonNull) .collect(Collectors.toList()); return new PagedResponseBase<>( objectPagedResponse.getRequest(), objectPagedResponse.getStatusCode(), objectPagedResponse.getHeaders(), stringList, objectPagedResponse.getContinuationToken(), ((PagedResponseBase)objectPagedResponse).getDeserializedHeaders()); }); } /** * Creates one or many models. * @param models The list of models to create. Each string corresponds to exactly one model. * @return A {@link PagedFlux} of created models. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<ModelData> createModels(List<String> models) { return new PagedFlux<>( () -> withContext(context -> createModelsSinglePageAsync(models, context)), nextLink -> withContext(context -> Mono.empty())); } PagedFlux<ModelData> createModels(List<String> models, Context context){ return new PagedFlux<>( () -> createModelsSinglePageAsync(models, context), nextLink -> Mono.empty()); } Mono<PagedResponse<ModelData>> createModelsSinglePageAsync(List<String> models, Context context) { List<Object> modelsPayload = new ArrayList<>(); for (String model: models) { try { modelsPayload.add(mapper.readValue(model, Object.class)); } catch (JsonProcessingException e) { logger.error("Could not parse the model payload [%s]: %s", model, e); return Mono.error(e); } } return protocolLayer.getDigitalTwinModels().addWithResponseAsync(modelsPayload, context) .map( listResponse -> new PagedResponseBase<>( listResponse.getRequest(), listResponse.getStatusCode(), listResponse.getHeaders(), listResponse.getValue(), null, ((ResponseBase)listResponse).getDeserializedHeaders())); } /** * Gets a model, including the model metadata and the model definition. * @param modelId The Id of the model. * @return The application/json model */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<ModelData> getModel(String modelId) { return withContext(context -> getModel(modelId, context)); } Mono<ModelData> getModel(String modelId, Context context){ return protocolLayer.getDigitalTwinModels().getByIdWithResponseAsync(modelId, includeModelDefinition, context) .flatMap(modelDataResponse -> Mono.just(mapper.convertValue(modelDataResponse.getValue(), ModelData.class))); } /** * Gets a model, including the model metadata and the model definition. * @param modelId The Id of the model. * @return The application/json model */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<ModelData>> getModelWithResponse(String modelId) { return withContext(context -> getModelWithResponse(modelId, context)); } Mono<Response<ModelData>> getModelWithResponse(String modelId, Context context){ return protocolLayer .getDigitalTwinModels() .getByIdWithResponseAsync(modelId, includeModelDefinition, context); } /** * Gets the list of models by iterating through a collection. * @param listModelOptions The options for the list operation. * @return A {@link PagedFlux} of ModelData. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<ModelData> listModels(ListModelOptions listModelOptions) { return new PagedFlux<>( () -> withContext(context -> listModelsSinglePageAsync(listModelOptions, context)), nextLink -> withContext(context -> listModelsNextSinglePageAsync(nextLink, context))); } PagedFlux<ModelData> listModels(ListModelOptions listModelOptions, Context context){ return new PagedFlux<>( () -> listModelsSinglePageAsync(listModelOptions, context), nextLink -> listModelsNextSinglePageAsync(nextLink, context)); } Mono<PagedResponse<ModelData>> listModelsSinglePageAsync(ListModelOptions listModelOptions, Context context){ return protocolLayer.getDigitalTwinModels().listSinglePageAsync( (List<String>) listModelOptions.getDependenciesFor(), listModelOptions.getIncludeModelDefinition(), /** * Gets the list of models by iterating through a collection. * @return A {@link PagedFlux} of ModelData. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<ModelData> listModels() { return listModels(new ListModelOptions()); } PagedFlux<ModelData> listModels(Context context){ return new PagedFlux<>( () -> listModelsSinglePageAsync(new ListModelOptions(), context), nextLink -> listModelsNextSinglePageAsync(nextLink, context)); } }
class DigitalTwinsAsyncClient { private static final ClientLogger logger = new ClientLogger(DigitalTwinsAsyncClient.class); private static final ObjectMapper mapper = new ObjectMapper(); private final DigitalTwinsServiceVersion serviceVersion; private final AzureDigitalTwinsAPIImpl protocolLayer; private static final Boolean includeModelDefinition = true; DigitalTwinsAsyncClient(HttpPipeline pipeline, DigitalTwinsServiceVersion serviceVersion, String host) { final SimpleModule stringModule = new SimpleModule("String Serializer"); stringModule.addSerializer(new DigitalTwinsStringSerializer(String.class, mapper)); JacksonAdapter jacksonAdapter = new JacksonAdapter(); jacksonAdapter.serializer().registerModule(stringModule); this.protocolLayer = new AzureDigitalTwinsAPIImplBuilder() .host(host) .pipeline(pipeline) .serializerAdapter(jacksonAdapter) .buildClient(); this.serviceVersion = serviceVersion; } /** * Gets the Azure Digital Twins service API version that this client is configured to use for all service requests. * Unless configured while building this client through {@link DigitalTwinsClientBuilder * this value will be equal to the latest service API version supported by this client. * * @return The Azure Digital Twins service API version. */ public DigitalTwinsServiceVersion getServiceVersion() { return this.serviceVersion; } /** * Gets the {@link HttpPipeline} that this client is configured to use for all service requests. This pipeline can * be customized while building this client through {@link DigitalTwinsClientBuilder * * @return The {@link HttpPipeline} that this client uses for all service requests. */ public HttpPipeline getHttpPipeline() { return this.protocolLayer.getHttpPipeline(); } @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DigitalTwinsResponse<String>> createDigitalTwinWithResponse(String digitalTwinId, String digitalTwin) { return withContext(context -> createDigitalTwinWithResponse(digitalTwinId, digitalTwin, context)); } @ServiceMethod(returns = ReturnType.SINGLE) public <T> Mono<DigitalTwinsResponse<T>> createDigitalTwinWithResponse(String digitalTwinId, Object digitalTwin, Class<T> klazz) { return withContext(context -> createDigitalTwinWithResponse(digitalTwinId, digitalTwin, klazz, context)); } Mono<DigitalTwinsResponse<String>> createDigitalTwinWithResponse(String digitalTwinId, String digitalTwin, Context context) { return protocolLayer .getDigitalTwins() .addWithResponseAsync(digitalTwinId, digitalTwin, context) .flatMap( response -> { try { String jsonResponse = mapper.writeValueAsString(response.getValue()); DigitalTwinsResponseHeaders twinHeaders = mapper.convertValue(response.getDeserializedHeaders(), DigitalTwinsResponseHeaders.class); return Mono.just(new DigitalTwinsResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), jsonResponse, twinHeaders)); } catch (JsonProcessingException e) { return Mono.error(e); } }); } <T> Mono<DigitalTwinsResponse<T>> createDigitalTwinWithResponse(String digitalTwinId, Object digitalTwin, Class<T> klazz, Context context) { return protocolLayer .getDigitalTwins() .addWithResponseAsync(digitalTwinId, digitalTwin, context) .flatMap( response -> { T genericResponse = mapper.convertValue(response.getValue(), klazz); DigitalTwinsResponseHeaders twinHeaders = mapper.convertValue(response.getDeserializedHeaders(), DigitalTwinsResponseHeaders.class); return Mono.just(new DigitalTwinsResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), genericResponse, twinHeaders)); }); } /** * Creates a relationship on a digital twin. * * @param digitalTwinId The Id of the source digital twin. * @param relationshipId The Id of the relationship to be created. * @param relationship The application/json relationship to be created. * @return A REST response containing the application/json relationship created. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<String>> createRelationshipWithResponse(String digitalTwinId, String relationshipId, String relationship) { return withContext(context -> createRelationshipWithResponse(digitalTwinId, relationshipId, relationship, context)); } Mono<Response<String>> createRelationshipWithResponse(String digitalTwinId, String relationshipId, String relationship, Context context) { return protocolLayer .getDigitalTwins() .addRelationshipWithResponseAsync(digitalTwinId, relationshipId, relationship, context) .flatMap( response -> { try { String jsonResponse = mapper.writeValueAsString(response.getValue()); return Mono.just(new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), jsonResponse)); } catch (JsonProcessingException e) { return Mono.error(e); } }); } /** * Gets all the relationships on a digital twin filtered by the relationship name, by iterating through a collection. * * @param digitalTwinId The Id of the source digital twin. * @param relationshipName The name of a relationship to filter to. * @return A {@link PagedFlux} of application/json relationships belonging to the specified digital twin and the http response. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<String> listRelationships(String digitalTwinId, String relationshipName) { return new PagedFlux<>( () -> withContext(context -> listRelationshipsSinglePageAsync(digitalTwinId, relationshipName, context)), nextLink -> withContext(context -> listRelationshipsNextSinglePageAsync(nextLink, context))); } PagedFlux<String> listRelationships(String digitalTwinId, String relationshipName, Context context) { return new PagedFlux<>( () -> listRelationshipsSinglePageAsync(digitalTwinId, relationshipName, context), nextLink -> listRelationshipsNextSinglePageAsync(nextLink, context)); } Mono<PagedResponse<String>> listRelationshipsSinglePageAsync(String digitalTwinId, String relationshipName, Context context) { return protocolLayer.getDigitalTwins().listRelationshipsSinglePageAsync(digitalTwinId, relationshipName, context) .map( objectPagedResponse -> { List<String> stringList = objectPagedResponse.getValue().stream() .map(object -> { try { return mapper.writeValueAsString(object); } catch (JsonProcessingException e) { logger.error("Could not parse the returned relationship [%s]: %s", object, e); return null; } }) .filter(Objects::nonNull) .collect(Collectors.toList()); return new PagedResponseBase<>( objectPagedResponse.getRequest(), objectPagedResponse.getStatusCode(), objectPagedResponse.getHeaders(), stringList, objectPagedResponse.getContinuationToken(), ((PagedResponseBase) objectPagedResponse).getDeserializedHeaders()); } ); } Mono<PagedResponse<String>> listRelationshipsNextSinglePageAsync(String nextLink, Context context) { return protocolLayer.getDigitalTwins().listRelationshipsNextSinglePageAsync(nextLink, context) .map(objectPagedResponse -> { List<String> stringList = objectPagedResponse.getValue().stream() .map(object -> { try { return mapper.writeValueAsString(object); } catch (JsonProcessingException e) { logger.error("Could not parse the returned relationship [%s]: %s", object, e); return null; } }) .filter(Objects::nonNull) .collect(Collectors.toList()); return new PagedResponseBase<>( objectPagedResponse.getRequest(), objectPagedResponse.getStatusCode(), objectPagedResponse.getHeaders(), stringList, objectPagedResponse.getContinuationToken(), ((PagedResponseBase)objectPagedResponse).getDeserializedHeaders()); }); } /** * Creates one or many models. * @param models The list of models to create. Each string corresponds to exactly one model. * @return A {@link PagedFlux} of created models and the http response. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<ModelData> createModels(List<String> models) { return new PagedFlux<>( () -> withContext(context -> createModelsSinglePageAsync(models, context)), nextLink -> withContext(context -> Mono.empty())); } PagedFlux<ModelData> createModels(List<String> models, Context context){ return new PagedFlux<>( () -> createModelsSinglePageAsync(models, context), nextLink -> Mono.empty()); } Mono<PagedResponse<ModelData>> createModelsSinglePageAsync(List<String> models, Context context) { List<Object> modelsPayload = new ArrayList<>(); for (String model: models) { try { modelsPayload.add(mapper.readValue(model, Object.class)); } catch (JsonProcessingException e) { logger.error("Could not parse the model payload [%s]: %s", model, e); return Mono.error(e); } } return protocolLayer.getDigitalTwinModels().addWithResponseAsync(modelsPayload, context) .map( listResponse -> new PagedResponseBase<>( listResponse.getRequest(), listResponse.getStatusCode(), listResponse.getHeaders(), listResponse.getValue(), null, ((ResponseBase)listResponse).getDeserializedHeaders())); } /** * Gets a model, including the model metadata and the model definition. * @param modelId The Id of the model. * @return The ModelData */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<ModelData> getModel(String modelId) { return withContext(context -> getModelWithResponse(modelId, context)) .flatMap(response -> Mono.just(response.getValue())); } /** * Gets a model, including the model metadata and the model definition. * @param modelId The Id of the model. * @return The ModelData and the http response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<ModelData>> getModelWithResponse(String modelId) { return withContext(context -> getModelWithResponse(modelId, context)); } Mono<Response<ModelData>> getModelWithResponse(String modelId, Context context){ return protocolLayer .getDigitalTwinModels() .getByIdWithResponseAsync(modelId, includeModelDefinition, context); } /** * Gets the list of models by iterating through a collection. * @param listModelOptions The options to follow when listing the models. For example, the page size hint can be specified. * @return A {@link PagedFlux} of ModelData and the http response. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<ModelData> listModels(ListModelOptions listModelOptions) { return new PagedFlux<>( () -> withContext(context -> listModelsSinglePageAsync(listModelOptions, context)), nextLink -> withContext(context -> listModelsNextSinglePageAsync(nextLink, context))); } /** * Gets the list of models by iterating through a collection. * @return A {@link PagedFlux} of ModelData and the http response. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<ModelData> listModels() { return listModels(new ListModelOptions()); } PagedFlux<ModelData> listModels(Context context){ return new PagedFlux<>( () -> listModelsSinglePageAsync(new ListModelOptions(), context), nextLink -> listModelsNextSinglePageAsync(nextLink, context)); } PagedFlux<ModelData> listModels(ListModelOptions listModelOptions, Context context){ return new PagedFlux<>( () -> listModelsSinglePageAsync(listModelOptions, context), nextLink -> listModelsNextSinglePageAsync(nextLink, context)); } Mono<PagedResponse<ModelData>> listModelsSinglePageAsync(ListModelOptions listModelOptions, Context context){ return protocolLayer.getDigitalTwinModels().listSinglePageAsync( listModelOptions.getDependenciesFor(), listModelOptions.getIncludeModelDefinition(), /** * Deletes a model. * @param modelId The Id for the model. The Id is globally unique and case sensitive. * @return An empty Mono */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteModel(String modelId) { return withContext(context -> deleteModelWithResponse(modelId, context)) .flatMap(response -> Mono.just(response.getValue())); } /** * Deletes a model. * @param modelId The Id for the model. The Id is globally unique and case sensitive. * @return The http response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteModelWithResponse(String modelId) { return withContext(context -> deleteModelWithResponse(modelId, context)); } Mono<Response<Void>> deleteModelWithResponse(String modelId, Context context){ return protocolLayer.getDigitalTwinModels().deleteWithResponseAsync(modelId, context); } }
So is this treated as empty?
public static void main(String[] args) throws JsonProcessingException { String tenantId = System.getenv("TENANT_ID"); String clientId = System.getenv("CLIENT_ID"); String clientSecret = System.getenv("CLIENT_SECRET"); String endpoint = System.getenv("DIGITAL_TWINS_ENDPOINT"); String sourceDigitalTwinId = System.getenv("SOURCE_DIGITAL_TWIN_ID"); String sourceDigitalTwin = System.getenv("SOURCE_DIGITAL_TWIN"); String targetDigitalTwinId = System.getenv("TARGET_DIGITAL_TWIN_ID"); String targetDigitalTwin = System.getenv("TARGET_DIGITAL_TWIN"); String relationshipId = System.getenv("RELATIONSHIP_ID"); String relationship = System.getenv("RELATIONSHIP"); TokenCredential tokenCredential = new ClientSecretCredentialBuilder() .tenantId(tenantId) .clientId(clientId) .clientSecret(clientSecret) .build(); DigitalTwinsClient client = new DigitalTwinsClientBuilder() .tokenCredential(tokenCredential) .endpoint(endpoint) .buildClient(); String createdRelationship = client.createRelationshipWithResponse(sourceDigitalTwinId, relationshipId, relationship, Context.NONE).getValue(); System.out.println("Created relationship: " + createdRelationship); PagedIterable<String> relationships = client.listRelationships(sourceDigitalTwinId, relationshipId, Context.NONE); relationships .streamByPage() .forEach(page -> { System.out.println("Response headers status code is " + page.getStatusCode()); page.getValue().forEach(item -> System.out.println("Relationship retrieved: " + item)); }); relationships .iterableByPage() .forEach(page -> { System.out.println("Response headers status code is " + page.getStatusCode()); page.getValue().forEach(item -> System.out.println("Relationship retrieved: " + item)); }); }
String createdRelationship = client.createRelationshipWithResponse(sourceDigitalTwinId, relationshipId, relationship, Context.NONE).getValue();
public static void main(String[] args) throws JsonProcessingException { String tenantId = System.getenv("TENANT_ID"); String clientId = System.getenv("CLIENT_ID"); String clientSecret = System.getenv("CLIENT_SECRET"); String endpoint = System.getenv("DIGITAL_TWINS_ENDPOINT"); String sourceDigitalTwinId = System.getenv("SOURCE_DIGITAL_TWIN_ID"); String sourceDigitalTwin = System.getenv("SOURCE_DIGITAL_TWIN"); String targetDigitalTwinId = System.getenv("TARGET_DIGITAL_TWIN_ID"); String targetDigitalTwin = System.getenv("TARGET_DIGITAL_TWIN"); String relationshipId = System.getenv("RELATIONSHIP_ID"); String relationship = System.getenv("RELATIONSHIP"); TokenCredential tokenCredential = new ClientSecretCredentialBuilder() .tenantId(tenantId) .clientId(clientId) .clientSecret(clientSecret) .build(); DigitalTwinsClient client = new DigitalTwinsClientBuilder() .tokenCredential(tokenCredential) .endpoint(endpoint) .buildClient(); String createdRelationship = client.createRelationshipWithResponse(sourceDigitalTwinId, relationshipId, relationship, Context.NONE).getValue(); System.out.println("Created relationship: " + createdRelationship); PagedIterable<String> relationships = client.listRelationships(sourceDigitalTwinId, relationshipId, Context.NONE); relationships .streamByPage() .forEach(page -> { System.out.println("Response headers status code is " + page.getStatusCode()); page.getValue().forEach(item -> System.out.println("Relationship retrieved: " + item)); }); relationships .iterableByPage() .forEach(page -> { System.out.println("Response headers status code is " + page.getStatusCode()); page.getValue().forEach(item -> System.out.println("Relationship retrieved: " + item)); }); }
class SyncSample { }
class SyncSample { }
yes `public static final Context NONE = new Context((Context)null, (Object)null, (Object)null);`
public static void main(String[] args) throws JsonProcessingException { String tenantId = System.getenv("TENANT_ID"); String clientId = System.getenv("CLIENT_ID"); String clientSecret = System.getenv("CLIENT_SECRET"); String endpoint = System.getenv("DIGITAL_TWINS_ENDPOINT"); String sourceDigitalTwinId = System.getenv("SOURCE_DIGITAL_TWIN_ID"); String sourceDigitalTwin = System.getenv("SOURCE_DIGITAL_TWIN"); String targetDigitalTwinId = System.getenv("TARGET_DIGITAL_TWIN_ID"); String targetDigitalTwin = System.getenv("TARGET_DIGITAL_TWIN"); String relationshipId = System.getenv("RELATIONSHIP_ID"); String relationship = System.getenv("RELATIONSHIP"); TokenCredential tokenCredential = new ClientSecretCredentialBuilder() .tenantId(tenantId) .clientId(clientId) .clientSecret(clientSecret) .build(); DigitalTwinsClient client = new DigitalTwinsClientBuilder() .tokenCredential(tokenCredential) .endpoint(endpoint) .buildClient(); String createdRelationship = client.createRelationshipWithResponse(sourceDigitalTwinId, relationshipId, relationship, Context.NONE).getValue(); System.out.println("Created relationship: " + createdRelationship); PagedIterable<String> relationships = client.listRelationships(sourceDigitalTwinId, relationshipId, Context.NONE); relationships .streamByPage() .forEach(page -> { System.out.println("Response headers status code is " + page.getStatusCode()); page.getValue().forEach(item -> System.out.println("Relationship retrieved: " + item)); }); relationships .iterableByPage() .forEach(page -> { System.out.println("Response headers status code is " + page.getStatusCode()); page.getValue().forEach(item -> System.out.println("Relationship retrieved: " + item)); }); }
String createdRelationship = client.createRelationshipWithResponse(sourceDigitalTwinId, relationshipId, relationship, Context.NONE).getValue();
public static void main(String[] args) throws JsonProcessingException { String tenantId = System.getenv("TENANT_ID"); String clientId = System.getenv("CLIENT_ID"); String clientSecret = System.getenv("CLIENT_SECRET"); String endpoint = System.getenv("DIGITAL_TWINS_ENDPOINT"); String sourceDigitalTwinId = System.getenv("SOURCE_DIGITAL_TWIN_ID"); String sourceDigitalTwin = System.getenv("SOURCE_DIGITAL_TWIN"); String targetDigitalTwinId = System.getenv("TARGET_DIGITAL_TWIN_ID"); String targetDigitalTwin = System.getenv("TARGET_DIGITAL_TWIN"); String relationshipId = System.getenv("RELATIONSHIP_ID"); String relationship = System.getenv("RELATIONSHIP"); TokenCredential tokenCredential = new ClientSecretCredentialBuilder() .tenantId(tenantId) .clientId(clientId) .clientSecret(clientSecret) .build(); DigitalTwinsClient client = new DigitalTwinsClientBuilder() .tokenCredential(tokenCredential) .endpoint(endpoint) .buildClient(); String createdRelationship = client.createRelationshipWithResponse(sourceDigitalTwinId, relationshipId, relationship, Context.NONE).getValue(); System.out.println("Created relationship: " + createdRelationship); PagedIterable<String> relationships = client.listRelationships(sourceDigitalTwinId, relationshipId, Context.NONE); relationships .streamByPage() .forEach(page -> { System.out.println("Response headers status code is " + page.getStatusCode()); page.getValue().forEach(item -> System.out.println("Relationship retrieved: " + item)); }); relationships .iterableByPage() .forEach(page -> { System.out.println("Response headers status code is " + page.getStatusCode()); page.getValue().forEach(item -> System.out.println("Relationship retrieved: " + item)); }); }
class SyncSample { }
class SyncSample { }
The placeholder for these is `{}` rather than `%s`
private Object getEntityFieldAsObject(JsonNode parentNode, String fieldName) { JsonNode valueNode = parentNode.get(fieldName); if (!TablesConstants.METADATA_KEYS.contains(fieldName) && !fieldName.endsWith("@odata.type")) { JsonNode typeNode = parentNode.get(fieldName + "@odata.type"); if (typeNode != null) { String type = typeNode.asText(); switch (type) { case "Edm.DateTime": try { return OffsetDateTime.parse(valueNode.asText()); } catch (DateTimeParseException e) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "'%s' value is not a valid OffsetDateTime.", TablesConstants.TIMESTAMP_KEY), e)); } default: logger.warning("'%s' value has unknown OData type '%s'", fieldName, type); break; } } } return valueNode.asText(); }
default:
private Object getEntityFieldAsObject(JsonNode parentNode, String fieldName) { JsonNode valueNode = parentNode.get(fieldName); if (!TablesConstants.METADATA_KEYS.contains(fieldName) && !fieldName.endsWith("@odata.type")) { JsonNode typeNode = parentNode.get(fieldName + "@odata.type"); if (typeNode != null) { String type = typeNode.asText(); switch (type) { case "Edm.DateTime": try { return OffsetDateTime.parse(valueNode.asText()); } catch (DateTimeParseException e) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "'%s' value is not a valid OffsetDateTime.", TablesConstants.TIMESTAMP_KEY), e)); } default: logger.warning(String.format("'%s' value has unknown OData type '%s'", fieldName, type)); break; } } } return valueNode.asText(); }
class TablesJacksonSerializer extends JacksonAdapter { private final ClientLogger logger = new ClientLogger(TablesJacksonSerializer.class); @Override public <U> U deserialize(String value, Type type, SerializerEncoding serializerEncoding) throws IOException { if (type == TableEntityQueryResponse.class) { return deserialize(new ByteArrayInputStream(value.getBytes(StandardCharsets.UTF_8)), type, serializerEncoding); } else { return super.deserialize(value, type, serializerEncoding); } } @Override public <U> U deserialize(InputStream inputStream, Type type, SerializerEncoding serializerEncoding) throws IOException { if (type == TableEntityQueryResponse.class) { return deserializeTableEntityQueryResponse(inputStream); } else { return super.deserialize(inputStream, type, serializerEncoding); } } @SuppressWarnings("unchecked") private <U> U deserializeTableEntityQueryResponse(InputStream inputStream) throws IOException { String odataMetadata = null; List<Map<String, Object>> values = new ArrayList<>(); final JsonNode node = super.serializer().readTree(inputStream); Map<String, Object> singleValue = null; for (Iterator<Map.Entry<String, JsonNode>> it = node.fields(); it.hasNext();) { final Map.Entry<String, JsonNode> entry = it.next(); final String fieldName = entry.getKey(); final JsonNode childNode = entry.getValue(); if (fieldName.equals(TablesConstants.ODATA_METADATA_KEY)) { odataMetadata = childNode.asText(); } else if (fieldName.equals("value")) { if (childNode.isArray()) { for (JsonNode childEntry : childNode) { values.add(getEntityFieldsAsMap(childEntry)); } } else { if (singleValue == null) { singleValue = new HashMap<>(); } singleValue.put(fieldName, getEntityFieldAsObject(node, fieldName)); } } else { if (singleValue == null) { singleValue = new HashMap<>(); } singleValue.put(fieldName, getEntityFieldAsObject(node, fieldName)); } } if (singleValue != null) { if (values.size() > 0) { throw logger.logExceptionAsError(new IllegalStateException( "Unexpected response format. Response containing a 'value' array must not contain other properties." )); } values.add(singleValue); } return (U) new TableEntityQueryResponse() .setOdataMetadata(odataMetadata) .setValue(values); } private Map<String, Object> getEntityFieldsAsMap(JsonNode node) { Map<String, Object> result = new HashMap<>(); for (Iterator<String> it = node.fieldNames(); it.hasNext(); ) { String fieldName = it.next(); result.put(fieldName, getEntityFieldAsObject(node, fieldName)); } return result; } }
class TablesJacksonSerializer extends JacksonAdapter { private final ClientLogger logger = new ClientLogger(TablesJacksonSerializer.class); @Override public <U> U deserialize(String value, Type type, SerializerEncoding serializerEncoding) throws IOException { if (type == TableEntityQueryResponse.class) { return deserialize(new ByteArrayInputStream(value.getBytes(StandardCharsets.UTF_8)), type, serializerEncoding); } else { return super.deserialize(value, type, serializerEncoding); } } @Override public <U> U deserialize(InputStream inputStream, Type type, SerializerEncoding serializerEncoding) throws IOException { if (type == TableEntityQueryResponse.class) { return deserializeTableEntityQueryResponse(inputStream); } else { return super.deserialize(inputStream, type, serializerEncoding); } } @SuppressWarnings("unchecked") private <U> U deserializeTableEntityQueryResponse(InputStream inputStream) throws IOException { String odataMetadata = null; List<Map<String, Object>> values = new ArrayList<>(); final JsonNode node = super.serializer().readTree(inputStream); Map<String, Object> singleValue = null; for (Iterator<Map.Entry<String, JsonNode>> it = node.fields(); it.hasNext();) { final Map.Entry<String, JsonNode> entry = it.next(); final String fieldName = entry.getKey(); final JsonNode childNode = entry.getValue(); if (fieldName.equals(TablesConstants.ODATA_METADATA_KEY)) { odataMetadata = childNode.asText(); } else if (fieldName.equals("value")) { if (childNode.isArray()) { for (JsonNode childEntry : childNode) { values.add(getEntityFieldsAsMap(childEntry)); } } else { if (singleValue == null) { singleValue = new HashMap<>(); } singleValue.put(fieldName, getEntityFieldAsObject(node, fieldName)); } } else { if (singleValue == null) { singleValue = new HashMap<>(); } singleValue.put(fieldName, getEntityFieldAsObject(node, fieldName)); } } if (singleValue != null) { if (values.size() > 0) { throw logger.logExceptionAsError(new IllegalStateException( "Unexpected response format. Response containing a 'value' array must not contain other properties." )); } values.add(singleValue); } return (U) new TableEntityQueryResponse() .setOdataMetadata(odataMetadata) .setValue(values); } private Map<String, Object> getEntityFieldsAsMap(JsonNode node) { Map<String, Object> result = new HashMap<>(); for (Iterator<String> it = node.fieldNames(); it.hasNext();) { String fieldName = it.next(); result.put(fieldName, getEntityFieldAsObject(node, fieldName)); } return result; } }
You can use the following, this is like Task.whenAll that completes when all three are done. ```java Mono.when( tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)), tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)), tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue3))).block(TIMEOUT); ```
void listEntitiesWithTopAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20); final String rowKeyValue3 = testResourceNamer.randomName("rowKey", 20); ListEntitiesOptions options = new ListEntitiesOptions().setTop(2); tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)).block(TIMEOUT); tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)).block(TIMEOUT); tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue3)).block(TIMEOUT); StepVerifier.create(tableClient.listEntities(options)) .expectNextCount(2) .thenConsumeWhile(x -> true) .expectComplete() .verify(); }
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)).block(TIMEOUT);
void listEntitiesWithTopAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20); final String rowKeyValue3 = testResourceNamer.randomName("rowKey", 20); ListEntitiesOptions options = new ListEntitiesOptions().setTop(2); Mono.when( tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)), tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)), tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue3)) ).block(TIMEOUT); StepVerifier.create(tableClient.listEntities(options)) .expectNextCount(2) .thenConsumeWhile(x -> true) .expectComplete() .verify(); }
class TablesAsyncClientTest extends TestBase { private static final Duration TIMEOUT = Duration.ofSeconds(30); private TableAsyncClient tableClient; private HttpPipelinePolicy recordPolicy; private HttpClient playbackClient; @Override protected void beforeTest() { final String tableName = testResourceNamer.randomName("tableName", 20); final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode()); final TableClientBuilder builder = new TableClientBuilder() .connectionString(connectionString) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .tableName(tableName); if (interceptorManager.isPlaybackMode()) { playbackClient = interceptorManager.getPlaybackClient(); builder.httpClient(playbackClient); } else { recordPolicy = interceptorManager.getRecordPolicy(); builder.httpClient(HttpClient.createDefault()) .addPolicy(recordPolicy) .addPolicy(new RetryPolicy()); } tableClient = builder.buildAsyncClient(); tableClient.create().block(TIMEOUT); } @Test void createTableAsync() { final String tableName2 = testResourceNamer.randomName("tableName", 20); final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode()); final TableClientBuilder builder = new TableClientBuilder() .connectionString(connectionString) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .tableName(tableName2); if (interceptorManager.isPlaybackMode()) { builder.httpClient(playbackClient); } else { builder.httpClient(HttpClient.createDefault()) .addPolicy(recordPolicy) .addPolicy(new RetryPolicy()); } final TableAsyncClient asyncClient2 = builder.buildAsyncClient(); StepVerifier.create(asyncClient2.create()) .expectComplete() .verify(); } @Test void createTableWithResponseAsync() { final String tableName2 = testResourceNamer.randomName("tableName", 20); final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode()); final TableClientBuilder builder = new TableClientBuilder() .connectionString(connectionString) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .tableName(tableName2); if (interceptorManager.isPlaybackMode()) { builder.httpClient(playbackClient); } else { builder.httpClient(HttpClient.createDefault()) .addPolicy(recordPolicy) .addPolicy(new RetryPolicy()); } final TableAsyncClient asyncClient2 = builder.buildAsyncClient(); final int expectedStatusCode = 204; StepVerifier.create(asyncClient2.createWithResponse()) .assertNext(response -> { assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void createEntityAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue); StepVerifier.create(tableClient.createEntity(tableEntity)) .expectComplete() .verify(); } @Test void createEntityWithResponseAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue); final int expectedStatusCode = 204; StepVerifier.create(tableClient.createEntityWithResponse(entity)) .assertNext(response -> { assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void deleteTableAsync() { StepVerifier.create(tableClient.delete()) .expectComplete() .verify(); } @Test void deleteTableWithResponseAsync() { final int expectedStatusCode = 204; StepVerifier.create(tableClient.deleteWithResponse()) .assertNext(response -> { assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void deleteEntityAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue); tableClient.createEntity(tableEntity).block(TIMEOUT); final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT); assertNotNull(createdEntity, "'createdEntity' should not be null."); assertNotNull(createdEntity.getETag(), "'eTag' should not be null."); StepVerifier.create(tableClient.deleteEntity(partitionKeyValue, rowKeyValue)) .expectComplete() .verify(); } @Test void deleteEntityWithResponseAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue); final int expectedStatusCode = 204; tableClient.createEntity(tableEntity).block(TIMEOUT); final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT); assertNotNull(createdEntity, "'createdEntity' should not be null."); assertNotNull(createdEntity.getETag(), "'eTag' should not be null."); StepVerifier.create(tableClient.deleteEntityWithResponse(partitionKeyValue, rowKeyValue, null)) .assertNext(response -> { assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void deleteEntityWithResponseMatchETagAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue); final int expectedStatusCode = 204; tableClient.createEntity(tableEntity).block(TIMEOUT); final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT); assertNotNull(createdEntity, "'createdEntity' should not be null."); assertNotNull(createdEntity.getETag(), "'eTag' should not be null."); StepVerifier.create(tableClient.deleteEntityWithResponse(partitionKeyValue, rowKeyValue, createdEntity.getETag())) .assertNext(response -> { assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void getEntityWithResponseAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue); final int expectedStatusCode = 200; tableClient.createEntity(tableEntity).block(TIMEOUT); StepVerifier.create(tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue)) .assertNext(response -> { final TableEntity entity = response.getValue(); assertEquals(expectedStatusCode, response.getStatusCode()); assertNotNull(entity); assertEquals(tableEntity.getPartitionKey(), entity.getPartitionKey()); assertEquals(tableEntity.getRowKey(), entity.getRowKey()); assertNotNull(entity.getTimestamp()); assertNotNull(entity.getETag()); assertNotNull(entity.getProperties()); }) .expectComplete() .verify(); } @Test void updateEntityWithResponseReplaceAsync() { updateEntityWithResponseAsync(UpdateMode.REPLACE); } @Test void updateEntityWithResponseMergeAsync() { updateEntityWithResponseAsync(UpdateMode.MERGE); } /** * In the case of {@link UpdateMode * In the case of {@link UpdateMode */ void updateEntityWithResponseAsync(UpdateMode mode) { final boolean expectOldProperty = mode == UpdateMode.MERGE; final String partitionKeyValue = testResourceNamer.randomName("APartitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("ARowKey", 20); final int expectedStatusCode = 204; final String oldPropertyKey = "propertyA"; final String newPropertyKey = "propertyB"; final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue) .addProperty(oldPropertyKey, "valueA"); tableClient.createEntity(tableEntity).block(TIMEOUT); final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT); assertNotNull(createdEntity, "'createdEntity' should not be null."); assertNotNull(createdEntity.getETag(), "'eTag' should not be null."); createdEntity.getProperties().remove(oldPropertyKey); createdEntity.addProperty(newPropertyKey, "valueB"); StepVerifier.create(tableClient.updateEntityWithResponse(createdEntity, true, mode)) .assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode())) .expectComplete() .verify(); StepVerifier.create(tableClient.getEntity(partitionKeyValue, rowKeyValue)) .assertNext(entity -> { final Map<String, Object> properties = entity.getProperties(); assertTrue(properties.containsKey(newPropertyKey)); assertEquals(expectOldProperty, properties.containsKey(oldPropertyKey)); }) .verifyComplete(); } @Test @Tag("ListEntities") void listEntitiesAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20); tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)).block(TIMEOUT); tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)).block(TIMEOUT); StepVerifier.create(tableClient.listEntities()) .expectNextCount(2) .thenConsumeWhile(x -> true) .expectComplete() .verify(); } @Test @Tag("ListEntities") void listEntitiesWithFilterAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20); ListEntitiesOptions options = new ListEntitiesOptions().setFilter("RowKey eq '" + rowKeyValue + "'"); tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)).block(TIMEOUT); tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)).block(TIMEOUT); StepVerifier.create(tableClient.listEntities(options)) .assertNext(returnEntity -> { assertEquals(partitionKeyValue, returnEntity.getPartitionKey()); assertEquals(rowKeyValue, returnEntity.getRowKey()); }) .expectNextCount(0) .thenConsumeWhile(x -> true) .expectComplete() .verify(); } @Test @Tag("ListEntities") void listEntitiesWithSelectAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue) .addProperty("propertyC", "valueC") .addProperty("propertyD", "valueD"); ListEntitiesOptions options = new ListEntitiesOptions() .setSelect("propertyC"); tableClient.createEntity(entity).block(TIMEOUT); StepVerifier.create(tableClient.listEntities(options)) .assertNext(returnEntity -> { assertNull(returnEntity.getRowKey()); assertNull(returnEntity.getPartitionKey()); assertEquals("valueC", returnEntity.getProperties().get("propertyC")); assertNull(returnEntity.getProperties().get("propertyD")); }) .expectComplete() .verify(); } @Test @Tag("ListEntities") }
class TablesAsyncClientTest extends TestBase { private static final Duration TIMEOUT = Duration.ofSeconds(30); private TableAsyncClient tableClient; private HttpPipelinePolicy recordPolicy; private HttpClient playbackClient; @Override protected void beforeTest() { final String tableName = testResourceNamer.randomName("tableName", 20); final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode()); final TableClientBuilder builder = new TableClientBuilder() .connectionString(connectionString) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .tableName(tableName); if (interceptorManager.isPlaybackMode()) { playbackClient = interceptorManager.getPlaybackClient(); builder.httpClient(playbackClient); } else { recordPolicy = interceptorManager.getRecordPolicy(); builder.httpClient(HttpClient.createDefault()) .addPolicy(recordPolicy) .addPolicy(new RetryPolicy()); } tableClient = builder.buildAsyncClient(); tableClient.create().block(TIMEOUT); } @Test void createTableAsync() { final String tableName2 = testResourceNamer.randomName("tableName", 20); final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode()); final TableClientBuilder builder = new TableClientBuilder() .connectionString(connectionString) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .tableName(tableName2); if (interceptorManager.isPlaybackMode()) { builder.httpClient(playbackClient); } else { builder.httpClient(HttpClient.createDefault()) .addPolicy(recordPolicy) .addPolicy(new RetryPolicy()); } final TableAsyncClient asyncClient2 = builder.buildAsyncClient(); StepVerifier.create(asyncClient2.create()) .expectComplete() .verify(); } @Test void createTableWithResponseAsync() { final String tableName2 = testResourceNamer.randomName("tableName", 20); final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode()); final TableClientBuilder builder = new TableClientBuilder() .connectionString(connectionString) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .tableName(tableName2); if (interceptorManager.isPlaybackMode()) { builder.httpClient(playbackClient); } else { builder.httpClient(HttpClient.createDefault()) .addPolicy(recordPolicy) .addPolicy(new RetryPolicy()); } final TableAsyncClient asyncClient2 = builder.buildAsyncClient(); final int expectedStatusCode = 204; StepVerifier.create(asyncClient2.createWithResponse()) .assertNext(response -> { assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void createEntityAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue); StepVerifier.create(tableClient.createEntity(tableEntity)) .expectComplete() .verify(); } @Test void createEntityWithResponseAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue); final int expectedStatusCode = 204; StepVerifier.create(tableClient.createEntityWithResponse(entity)) .assertNext(response -> { assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void deleteTableAsync() { StepVerifier.create(tableClient.delete()) .expectComplete() .verify(); } @Test void deleteTableWithResponseAsync() { final int expectedStatusCode = 204; StepVerifier.create(tableClient.deleteWithResponse()) .assertNext(response -> { assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void deleteEntityAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue); tableClient.createEntity(tableEntity).block(TIMEOUT); final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT); assertNotNull(createdEntity, "'createdEntity' should not be null."); assertNotNull(createdEntity.getETag(), "'eTag' should not be null."); StepVerifier.create(tableClient.deleteEntity(partitionKeyValue, rowKeyValue)) .expectComplete() .verify(); } @Test void deleteEntityWithResponseAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue); final int expectedStatusCode = 204; tableClient.createEntity(tableEntity).block(TIMEOUT); final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT); assertNotNull(createdEntity, "'createdEntity' should not be null."); assertNotNull(createdEntity.getETag(), "'eTag' should not be null."); StepVerifier.create(tableClient.deleteEntityWithResponse(partitionKeyValue, rowKeyValue, null)) .assertNext(response -> { assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void deleteEntityWithResponseMatchETagAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue); final int expectedStatusCode = 204; tableClient.createEntity(tableEntity).block(TIMEOUT); final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT); assertNotNull(createdEntity, "'createdEntity' should not be null."); assertNotNull(createdEntity.getETag(), "'eTag' should not be null."); StepVerifier.create(tableClient.deleteEntityWithResponse(partitionKeyValue, rowKeyValue, createdEntity.getETag())) .assertNext(response -> { assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void getEntityWithResponseAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue); final int expectedStatusCode = 200; tableClient.createEntity(tableEntity).block(TIMEOUT); StepVerifier.create(tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue)) .assertNext(response -> { final TableEntity entity = response.getValue(); assertEquals(expectedStatusCode, response.getStatusCode()); assertNotNull(entity); assertEquals(tableEntity.getPartitionKey(), entity.getPartitionKey()); assertEquals(tableEntity.getRowKey(), entity.getRowKey()); assertNotNull(entity.getTimestamp()); assertNotNull(entity.getETag()); assertNotNull(entity.getProperties()); }) .expectComplete() .verify(); } @Test void updateEntityWithResponseReplaceAsync() { updateEntityWithResponseAsync(UpdateMode.REPLACE); } @Test void updateEntityWithResponseMergeAsync() { updateEntityWithResponseAsync(UpdateMode.MERGE); } /** * In the case of {@link UpdateMode * In the case of {@link UpdateMode */ void updateEntityWithResponseAsync(UpdateMode mode) { final boolean expectOldProperty = mode == UpdateMode.MERGE; final String partitionKeyValue = testResourceNamer.randomName("APartitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("ARowKey", 20); final int expectedStatusCode = 204; final String oldPropertyKey = "propertyA"; final String newPropertyKey = "propertyB"; final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue) .addProperty(oldPropertyKey, "valueA"); tableClient.createEntity(tableEntity).block(TIMEOUT); final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT); assertNotNull(createdEntity, "'createdEntity' should not be null."); assertNotNull(createdEntity.getETag(), "'eTag' should not be null."); createdEntity.getProperties().remove(oldPropertyKey); createdEntity.addProperty(newPropertyKey, "valueB"); StepVerifier.create(tableClient.updateEntityWithResponse(createdEntity, true, mode)) .assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode())) .expectComplete() .verify(); StepVerifier.create(tableClient.getEntity(partitionKeyValue, rowKeyValue)) .assertNext(entity -> { final Map<String, Object> properties = entity.getProperties(); assertTrue(properties.containsKey(newPropertyKey)); assertEquals(expectOldProperty, properties.containsKey(oldPropertyKey)); }) .verifyComplete(); } @Test @Tag("ListEntities") void listEntitiesAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20); Mono.when( tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)), tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)) ).block(TIMEOUT); StepVerifier.create(tableClient.listEntities()) .expectNextCount(2) .thenConsumeWhile(x -> true) .expectComplete() .verify(); } @Test @Tag("ListEntities") void listEntitiesWithFilterAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20); ListEntitiesOptions options = new ListEntitiesOptions().setFilter("RowKey eq '" + rowKeyValue + "'"); Mono.when( tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)), tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)) ).block(TIMEOUT); StepVerifier.create(tableClient.listEntities(options)) .assertNext(returnEntity -> { assertEquals(partitionKeyValue, returnEntity.getPartitionKey()); assertEquals(rowKeyValue, returnEntity.getRowKey()); }) .expectNextCount(0) .thenConsumeWhile(x -> true) .expectComplete() .verify(); } @Test @Tag("ListEntities") void listEntitiesWithSelectAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue) .addProperty("propertyC", "valueC") .addProperty("propertyD", "valueD"); ListEntitiesOptions options = new ListEntitiesOptions() .setSelect("propertyC"); tableClient.createEntity(entity).block(TIMEOUT); StepVerifier.create(tableClient.listEntities(options)) .assertNext(returnEntity -> { assertNull(returnEntity.getRowKey()); assertNull(returnEntity.getPartitionKey()); assertEquals("valueC", returnEntity.getProperties().get("propertyC")); assertNull(returnEntity.getProperties().get("propertyD")); }) .expectComplete() .verify(); } @Test @Tag("ListEntities") }
Same with those other instances.
void listEntitiesWithTopAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20); final String rowKeyValue3 = testResourceNamer.randomName("rowKey", 20); ListEntitiesOptions options = new ListEntitiesOptions().setTop(2); tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)).block(TIMEOUT); tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)).block(TIMEOUT); tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue3)).block(TIMEOUT); StepVerifier.create(tableClient.listEntities(options)) .expectNextCount(2) .thenConsumeWhile(x -> true) .expectComplete() .verify(); }
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)).block(TIMEOUT);
void listEntitiesWithTopAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20); final String rowKeyValue3 = testResourceNamer.randomName("rowKey", 20); ListEntitiesOptions options = new ListEntitiesOptions().setTop(2); Mono.when( tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)), tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)), tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue3)) ).block(TIMEOUT); StepVerifier.create(tableClient.listEntities(options)) .expectNextCount(2) .thenConsumeWhile(x -> true) .expectComplete() .verify(); }
class TablesAsyncClientTest extends TestBase { private static final Duration TIMEOUT = Duration.ofSeconds(30); private TableAsyncClient tableClient; private HttpPipelinePolicy recordPolicy; private HttpClient playbackClient; @Override protected void beforeTest() { final String tableName = testResourceNamer.randomName("tableName", 20); final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode()); final TableClientBuilder builder = new TableClientBuilder() .connectionString(connectionString) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .tableName(tableName); if (interceptorManager.isPlaybackMode()) { playbackClient = interceptorManager.getPlaybackClient(); builder.httpClient(playbackClient); } else { recordPolicy = interceptorManager.getRecordPolicy(); builder.httpClient(HttpClient.createDefault()) .addPolicy(recordPolicy) .addPolicy(new RetryPolicy()); } tableClient = builder.buildAsyncClient(); tableClient.create().block(TIMEOUT); } @Test void createTableAsync() { final String tableName2 = testResourceNamer.randomName("tableName", 20); final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode()); final TableClientBuilder builder = new TableClientBuilder() .connectionString(connectionString) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .tableName(tableName2); if (interceptorManager.isPlaybackMode()) { builder.httpClient(playbackClient); } else { builder.httpClient(HttpClient.createDefault()) .addPolicy(recordPolicy) .addPolicy(new RetryPolicy()); } final TableAsyncClient asyncClient2 = builder.buildAsyncClient(); StepVerifier.create(asyncClient2.create()) .expectComplete() .verify(); } @Test void createTableWithResponseAsync() { final String tableName2 = testResourceNamer.randomName("tableName", 20); final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode()); final TableClientBuilder builder = new TableClientBuilder() .connectionString(connectionString) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .tableName(tableName2); if (interceptorManager.isPlaybackMode()) { builder.httpClient(playbackClient); } else { builder.httpClient(HttpClient.createDefault()) .addPolicy(recordPolicy) .addPolicy(new RetryPolicy()); } final TableAsyncClient asyncClient2 = builder.buildAsyncClient(); final int expectedStatusCode = 204; StepVerifier.create(asyncClient2.createWithResponse()) .assertNext(response -> { assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void createEntityAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue); StepVerifier.create(tableClient.createEntity(tableEntity)) .expectComplete() .verify(); } @Test void createEntityWithResponseAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue); final int expectedStatusCode = 204; StepVerifier.create(tableClient.createEntityWithResponse(entity)) .assertNext(response -> { assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void deleteTableAsync() { StepVerifier.create(tableClient.delete()) .expectComplete() .verify(); } @Test void deleteTableWithResponseAsync() { final int expectedStatusCode = 204; StepVerifier.create(tableClient.deleteWithResponse()) .assertNext(response -> { assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void deleteEntityAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue); tableClient.createEntity(tableEntity).block(TIMEOUT); final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT); assertNotNull(createdEntity, "'createdEntity' should not be null."); assertNotNull(createdEntity.getETag(), "'eTag' should not be null."); StepVerifier.create(tableClient.deleteEntity(partitionKeyValue, rowKeyValue)) .expectComplete() .verify(); } @Test void deleteEntityWithResponseAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue); final int expectedStatusCode = 204; tableClient.createEntity(tableEntity).block(TIMEOUT); final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT); assertNotNull(createdEntity, "'createdEntity' should not be null."); assertNotNull(createdEntity.getETag(), "'eTag' should not be null."); StepVerifier.create(tableClient.deleteEntityWithResponse(partitionKeyValue, rowKeyValue, null)) .assertNext(response -> { assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void deleteEntityWithResponseMatchETagAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue); final int expectedStatusCode = 204; tableClient.createEntity(tableEntity).block(TIMEOUT); final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT); assertNotNull(createdEntity, "'createdEntity' should not be null."); assertNotNull(createdEntity.getETag(), "'eTag' should not be null."); StepVerifier.create(tableClient.deleteEntityWithResponse(partitionKeyValue, rowKeyValue, createdEntity.getETag())) .assertNext(response -> { assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void getEntityWithResponseAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue); final int expectedStatusCode = 200; tableClient.createEntity(tableEntity).block(TIMEOUT); StepVerifier.create(tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue)) .assertNext(response -> { final TableEntity entity = response.getValue(); assertEquals(expectedStatusCode, response.getStatusCode()); assertNotNull(entity); assertEquals(tableEntity.getPartitionKey(), entity.getPartitionKey()); assertEquals(tableEntity.getRowKey(), entity.getRowKey()); assertNotNull(entity.getTimestamp()); assertNotNull(entity.getETag()); assertNotNull(entity.getProperties()); }) .expectComplete() .verify(); } @Test void updateEntityWithResponseReplaceAsync() { updateEntityWithResponseAsync(UpdateMode.REPLACE); } @Test void updateEntityWithResponseMergeAsync() { updateEntityWithResponseAsync(UpdateMode.MERGE); } /** * In the case of {@link UpdateMode * In the case of {@link UpdateMode */ void updateEntityWithResponseAsync(UpdateMode mode) { final boolean expectOldProperty = mode == UpdateMode.MERGE; final String partitionKeyValue = testResourceNamer.randomName("APartitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("ARowKey", 20); final int expectedStatusCode = 204; final String oldPropertyKey = "propertyA"; final String newPropertyKey = "propertyB"; final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue) .addProperty(oldPropertyKey, "valueA"); tableClient.createEntity(tableEntity).block(TIMEOUT); final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT); assertNotNull(createdEntity, "'createdEntity' should not be null."); assertNotNull(createdEntity.getETag(), "'eTag' should not be null."); createdEntity.getProperties().remove(oldPropertyKey); createdEntity.addProperty(newPropertyKey, "valueB"); StepVerifier.create(tableClient.updateEntityWithResponse(createdEntity, true, mode)) .assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode())) .expectComplete() .verify(); StepVerifier.create(tableClient.getEntity(partitionKeyValue, rowKeyValue)) .assertNext(entity -> { final Map<String, Object> properties = entity.getProperties(); assertTrue(properties.containsKey(newPropertyKey)); assertEquals(expectOldProperty, properties.containsKey(oldPropertyKey)); }) .verifyComplete(); } @Test @Tag("ListEntities") void listEntitiesAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20); tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)).block(TIMEOUT); tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)).block(TIMEOUT); StepVerifier.create(tableClient.listEntities()) .expectNextCount(2) .thenConsumeWhile(x -> true) .expectComplete() .verify(); } @Test @Tag("ListEntities") void listEntitiesWithFilterAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20); ListEntitiesOptions options = new ListEntitiesOptions().setFilter("RowKey eq '" + rowKeyValue + "'"); tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)).block(TIMEOUT); tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)).block(TIMEOUT); StepVerifier.create(tableClient.listEntities(options)) .assertNext(returnEntity -> { assertEquals(partitionKeyValue, returnEntity.getPartitionKey()); assertEquals(rowKeyValue, returnEntity.getRowKey()); }) .expectNextCount(0) .thenConsumeWhile(x -> true) .expectComplete() .verify(); } @Test @Tag("ListEntities") void listEntitiesWithSelectAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue) .addProperty("propertyC", "valueC") .addProperty("propertyD", "valueD"); ListEntitiesOptions options = new ListEntitiesOptions() .setSelect("propertyC"); tableClient.createEntity(entity).block(TIMEOUT); StepVerifier.create(tableClient.listEntities(options)) .assertNext(returnEntity -> { assertNull(returnEntity.getRowKey()); assertNull(returnEntity.getPartitionKey()); assertEquals("valueC", returnEntity.getProperties().get("propertyC")); assertNull(returnEntity.getProperties().get("propertyD")); }) .expectComplete() .verify(); } @Test @Tag("ListEntities") }
class TablesAsyncClientTest extends TestBase { private static final Duration TIMEOUT = Duration.ofSeconds(30); private TableAsyncClient tableClient; private HttpPipelinePolicy recordPolicy; private HttpClient playbackClient; @Override protected void beforeTest() { final String tableName = testResourceNamer.randomName("tableName", 20); final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode()); final TableClientBuilder builder = new TableClientBuilder() .connectionString(connectionString) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .tableName(tableName); if (interceptorManager.isPlaybackMode()) { playbackClient = interceptorManager.getPlaybackClient(); builder.httpClient(playbackClient); } else { recordPolicy = interceptorManager.getRecordPolicy(); builder.httpClient(HttpClient.createDefault()) .addPolicy(recordPolicy) .addPolicy(new RetryPolicy()); } tableClient = builder.buildAsyncClient(); tableClient.create().block(TIMEOUT); } @Test void createTableAsync() { final String tableName2 = testResourceNamer.randomName("tableName", 20); final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode()); final TableClientBuilder builder = new TableClientBuilder() .connectionString(connectionString) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .tableName(tableName2); if (interceptorManager.isPlaybackMode()) { builder.httpClient(playbackClient); } else { builder.httpClient(HttpClient.createDefault()) .addPolicy(recordPolicy) .addPolicy(new RetryPolicy()); } final TableAsyncClient asyncClient2 = builder.buildAsyncClient(); StepVerifier.create(asyncClient2.create()) .expectComplete() .verify(); } @Test void createTableWithResponseAsync() { final String tableName2 = testResourceNamer.randomName("tableName", 20); final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode()); final TableClientBuilder builder = new TableClientBuilder() .connectionString(connectionString) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .tableName(tableName2); if (interceptorManager.isPlaybackMode()) { builder.httpClient(playbackClient); } else { builder.httpClient(HttpClient.createDefault()) .addPolicy(recordPolicy) .addPolicy(new RetryPolicy()); } final TableAsyncClient asyncClient2 = builder.buildAsyncClient(); final int expectedStatusCode = 204; StepVerifier.create(asyncClient2.createWithResponse()) .assertNext(response -> { assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void createEntityAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue); StepVerifier.create(tableClient.createEntity(tableEntity)) .expectComplete() .verify(); } @Test void createEntityWithResponseAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue); final int expectedStatusCode = 204; StepVerifier.create(tableClient.createEntityWithResponse(entity)) .assertNext(response -> { assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void deleteTableAsync() { StepVerifier.create(tableClient.delete()) .expectComplete() .verify(); } @Test void deleteTableWithResponseAsync() { final int expectedStatusCode = 204; StepVerifier.create(tableClient.deleteWithResponse()) .assertNext(response -> { assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void deleteEntityAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue); tableClient.createEntity(tableEntity).block(TIMEOUT); final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT); assertNotNull(createdEntity, "'createdEntity' should not be null."); assertNotNull(createdEntity.getETag(), "'eTag' should not be null."); StepVerifier.create(tableClient.deleteEntity(partitionKeyValue, rowKeyValue)) .expectComplete() .verify(); } @Test void deleteEntityWithResponseAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue); final int expectedStatusCode = 204; tableClient.createEntity(tableEntity).block(TIMEOUT); final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT); assertNotNull(createdEntity, "'createdEntity' should not be null."); assertNotNull(createdEntity.getETag(), "'eTag' should not be null."); StepVerifier.create(tableClient.deleteEntityWithResponse(partitionKeyValue, rowKeyValue, null)) .assertNext(response -> { assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void deleteEntityWithResponseMatchETagAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue); final int expectedStatusCode = 204; tableClient.createEntity(tableEntity).block(TIMEOUT); final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT); assertNotNull(createdEntity, "'createdEntity' should not be null."); assertNotNull(createdEntity.getETag(), "'eTag' should not be null."); StepVerifier.create(tableClient.deleteEntityWithResponse(partitionKeyValue, rowKeyValue, createdEntity.getETag())) .assertNext(response -> { assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void getEntityWithResponseAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue); final int expectedStatusCode = 200; tableClient.createEntity(tableEntity).block(TIMEOUT); StepVerifier.create(tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue)) .assertNext(response -> { final TableEntity entity = response.getValue(); assertEquals(expectedStatusCode, response.getStatusCode()); assertNotNull(entity); assertEquals(tableEntity.getPartitionKey(), entity.getPartitionKey()); assertEquals(tableEntity.getRowKey(), entity.getRowKey()); assertNotNull(entity.getTimestamp()); assertNotNull(entity.getETag()); assertNotNull(entity.getProperties()); }) .expectComplete() .verify(); } @Test void updateEntityWithResponseReplaceAsync() { updateEntityWithResponseAsync(UpdateMode.REPLACE); } @Test void updateEntityWithResponseMergeAsync() { updateEntityWithResponseAsync(UpdateMode.MERGE); } /** * In the case of {@link UpdateMode * In the case of {@link UpdateMode */ void updateEntityWithResponseAsync(UpdateMode mode) { final boolean expectOldProperty = mode == UpdateMode.MERGE; final String partitionKeyValue = testResourceNamer.randomName("APartitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("ARowKey", 20); final int expectedStatusCode = 204; final String oldPropertyKey = "propertyA"; final String newPropertyKey = "propertyB"; final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue) .addProperty(oldPropertyKey, "valueA"); tableClient.createEntity(tableEntity).block(TIMEOUT); final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT); assertNotNull(createdEntity, "'createdEntity' should not be null."); assertNotNull(createdEntity.getETag(), "'eTag' should not be null."); createdEntity.getProperties().remove(oldPropertyKey); createdEntity.addProperty(newPropertyKey, "valueB"); StepVerifier.create(tableClient.updateEntityWithResponse(createdEntity, true, mode)) .assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode())) .expectComplete() .verify(); StepVerifier.create(tableClient.getEntity(partitionKeyValue, rowKeyValue)) .assertNext(entity -> { final Map<String, Object> properties = entity.getProperties(); assertTrue(properties.containsKey(newPropertyKey)); assertEquals(expectOldProperty, properties.containsKey(oldPropertyKey)); }) .verifyComplete(); } @Test @Tag("ListEntities") void listEntitiesAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20); Mono.when( tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)), tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)) ).block(TIMEOUT); StepVerifier.create(tableClient.listEntities()) .expectNextCount(2) .thenConsumeWhile(x -> true) .expectComplete() .verify(); } @Test @Tag("ListEntities") void listEntitiesWithFilterAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20); ListEntitiesOptions options = new ListEntitiesOptions().setFilter("RowKey eq '" + rowKeyValue + "'"); Mono.when( tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)), tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)) ).block(TIMEOUT); StepVerifier.create(tableClient.listEntities(options)) .assertNext(returnEntity -> { assertEquals(partitionKeyValue, returnEntity.getPartitionKey()); assertEquals(rowKeyValue, returnEntity.getRowKey()); }) .expectNextCount(0) .thenConsumeWhile(x -> true) .expectComplete() .verify(); } @Test @Tag("ListEntities") void listEntitiesWithSelectAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue) .addProperty("propertyC", "valueC") .addProperty("propertyD", "valueD"); ListEntitiesOptions options = new ListEntitiesOptions() .setSelect("propertyC"); tableClient.createEntity(entity).block(TIMEOUT); StepVerifier.create(tableClient.listEntities(options)) .assertNext(returnEntity -> { assertNull(returnEntity.getRowKey()); assertNull(returnEntity.getPartitionKey()); assertEquals("valueC", returnEntity.getProperties().get("propertyC")); assertNull(returnEntity.getProperties().get("propertyD")); }) .expectComplete() .verify(); } @Test @Tag("ListEntities") }
Fixed.
private Object getEntityFieldAsObject(JsonNode parentNode, String fieldName) { JsonNode valueNode = parentNode.get(fieldName); if (!TablesConstants.METADATA_KEYS.contains(fieldName) && !fieldName.endsWith("@odata.type")) { JsonNode typeNode = parentNode.get(fieldName + "@odata.type"); if (typeNode != null) { String type = typeNode.asText(); switch (type) { case "Edm.DateTime": try { return OffsetDateTime.parse(valueNode.asText()); } catch (DateTimeParseException e) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "'%s' value is not a valid OffsetDateTime.", TablesConstants.TIMESTAMP_KEY), e)); } default: logger.warning("'%s' value has unknown OData type '%s'", fieldName, type); break; } } } return valueNode.asText(); }
default:
private Object getEntityFieldAsObject(JsonNode parentNode, String fieldName) { JsonNode valueNode = parentNode.get(fieldName); if (!TablesConstants.METADATA_KEYS.contains(fieldName) && !fieldName.endsWith("@odata.type")) { JsonNode typeNode = parentNode.get(fieldName + "@odata.type"); if (typeNode != null) { String type = typeNode.asText(); switch (type) { case "Edm.DateTime": try { return OffsetDateTime.parse(valueNode.asText()); } catch (DateTimeParseException e) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "'%s' value is not a valid OffsetDateTime.", TablesConstants.TIMESTAMP_KEY), e)); } default: logger.warning(String.format("'%s' value has unknown OData type '%s'", fieldName, type)); break; } } } return valueNode.asText(); }
class TablesJacksonSerializer extends JacksonAdapter { private final ClientLogger logger = new ClientLogger(TablesJacksonSerializer.class); @Override public <U> U deserialize(String value, Type type, SerializerEncoding serializerEncoding) throws IOException { if (type == TableEntityQueryResponse.class) { return deserialize(new ByteArrayInputStream(value.getBytes(StandardCharsets.UTF_8)), type, serializerEncoding); } else { return super.deserialize(value, type, serializerEncoding); } } @Override public <U> U deserialize(InputStream inputStream, Type type, SerializerEncoding serializerEncoding) throws IOException { if (type == TableEntityQueryResponse.class) { return deserializeTableEntityQueryResponse(inputStream); } else { return super.deserialize(inputStream, type, serializerEncoding); } } @SuppressWarnings("unchecked") private <U> U deserializeTableEntityQueryResponse(InputStream inputStream) throws IOException { String odataMetadata = null; List<Map<String, Object>> values = new ArrayList<>(); final JsonNode node = super.serializer().readTree(inputStream); Map<String, Object> singleValue = null; for (Iterator<Map.Entry<String, JsonNode>> it = node.fields(); it.hasNext();) { final Map.Entry<String, JsonNode> entry = it.next(); final String fieldName = entry.getKey(); final JsonNode childNode = entry.getValue(); if (fieldName.equals(TablesConstants.ODATA_METADATA_KEY)) { odataMetadata = childNode.asText(); } else if (fieldName.equals("value")) { if (childNode.isArray()) { for (JsonNode childEntry : childNode) { values.add(getEntityFieldsAsMap(childEntry)); } } else { if (singleValue == null) { singleValue = new HashMap<>(); } singleValue.put(fieldName, getEntityFieldAsObject(node, fieldName)); } } else { if (singleValue == null) { singleValue = new HashMap<>(); } singleValue.put(fieldName, getEntityFieldAsObject(node, fieldName)); } } if (singleValue != null) { if (values.size() > 0) { throw logger.logExceptionAsError(new IllegalStateException( "Unexpected response format. Response containing a 'value' array must not contain other properties." )); } values.add(singleValue); } return (U) new TableEntityQueryResponse() .setOdataMetadata(odataMetadata) .setValue(values); } private Map<String, Object> getEntityFieldsAsMap(JsonNode node) { Map<String, Object> result = new HashMap<>(); for (Iterator<String> it = node.fieldNames(); it.hasNext(); ) { String fieldName = it.next(); result.put(fieldName, getEntityFieldAsObject(node, fieldName)); } return result; } }
class TablesJacksonSerializer extends JacksonAdapter { private final ClientLogger logger = new ClientLogger(TablesJacksonSerializer.class); @Override public <U> U deserialize(String value, Type type, SerializerEncoding serializerEncoding) throws IOException { if (type == TableEntityQueryResponse.class) { return deserialize(new ByteArrayInputStream(value.getBytes(StandardCharsets.UTF_8)), type, serializerEncoding); } else { return super.deserialize(value, type, serializerEncoding); } } @Override public <U> U deserialize(InputStream inputStream, Type type, SerializerEncoding serializerEncoding) throws IOException { if (type == TableEntityQueryResponse.class) { return deserializeTableEntityQueryResponse(inputStream); } else { return super.deserialize(inputStream, type, serializerEncoding); } } @SuppressWarnings("unchecked") private <U> U deserializeTableEntityQueryResponse(InputStream inputStream) throws IOException { String odataMetadata = null; List<Map<String, Object>> values = new ArrayList<>(); final JsonNode node = super.serializer().readTree(inputStream); Map<String, Object> singleValue = null; for (Iterator<Map.Entry<String, JsonNode>> it = node.fields(); it.hasNext();) { final Map.Entry<String, JsonNode> entry = it.next(); final String fieldName = entry.getKey(); final JsonNode childNode = entry.getValue(); if (fieldName.equals(TablesConstants.ODATA_METADATA_KEY)) { odataMetadata = childNode.asText(); } else if (fieldName.equals("value")) { if (childNode.isArray()) { for (JsonNode childEntry : childNode) { values.add(getEntityFieldsAsMap(childEntry)); } } else { if (singleValue == null) { singleValue = new HashMap<>(); } singleValue.put(fieldName, getEntityFieldAsObject(node, fieldName)); } } else { if (singleValue == null) { singleValue = new HashMap<>(); } singleValue.put(fieldName, getEntityFieldAsObject(node, fieldName)); } } if (singleValue != null) { if (values.size() > 0) { throw logger.logExceptionAsError(new IllegalStateException( "Unexpected response format. Response containing a 'value' array must not contain other properties." )); } values.add(singleValue); } return (U) new TableEntityQueryResponse() .setOdataMetadata(odataMetadata) .setValue(values); } private Map<String, Object> getEntityFieldsAsMap(JsonNode node) { Map<String, Object> result = new HashMap<>(); for (Iterator<String> it = node.fieldNames(); it.hasNext();) { String fieldName = it.next(); result.put(fieldName, getEntityFieldAsObject(node, fieldName)); } return result; } }
Fixed
void listEntitiesWithTopAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20); final String rowKeyValue3 = testResourceNamer.randomName("rowKey", 20); ListEntitiesOptions options = new ListEntitiesOptions().setTop(2); tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)).block(TIMEOUT); tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)).block(TIMEOUT); tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue3)).block(TIMEOUT); StepVerifier.create(tableClient.listEntities(options)) .expectNextCount(2) .thenConsumeWhile(x -> true) .expectComplete() .verify(); }
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)).block(TIMEOUT);
void listEntitiesWithTopAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20); final String rowKeyValue3 = testResourceNamer.randomName("rowKey", 20); ListEntitiesOptions options = new ListEntitiesOptions().setTop(2); Mono.when( tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)), tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)), tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue3)) ).block(TIMEOUT); StepVerifier.create(tableClient.listEntities(options)) .expectNextCount(2) .thenConsumeWhile(x -> true) .expectComplete() .verify(); }
class TablesAsyncClientTest extends TestBase { private static final Duration TIMEOUT = Duration.ofSeconds(30); private TableAsyncClient tableClient; private HttpPipelinePolicy recordPolicy; private HttpClient playbackClient; @Override protected void beforeTest() { final String tableName = testResourceNamer.randomName("tableName", 20); final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode()); final TableClientBuilder builder = new TableClientBuilder() .connectionString(connectionString) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .tableName(tableName); if (interceptorManager.isPlaybackMode()) { playbackClient = interceptorManager.getPlaybackClient(); builder.httpClient(playbackClient); } else { recordPolicy = interceptorManager.getRecordPolicy(); builder.httpClient(HttpClient.createDefault()) .addPolicy(recordPolicy) .addPolicy(new RetryPolicy()); } tableClient = builder.buildAsyncClient(); tableClient.create().block(TIMEOUT); } @Test void createTableAsync() { final String tableName2 = testResourceNamer.randomName("tableName", 20); final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode()); final TableClientBuilder builder = new TableClientBuilder() .connectionString(connectionString) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .tableName(tableName2); if (interceptorManager.isPlaybackMode()) { builder.httpClient(playbackClient); } else { builder.httpClient(HttpClient.createDefault()) .addPolicy(recordPolicy) .addPolicy(new RetryPolicy()); } final TableAsyncClient asyncClient2 = builder.buildAsyncClient(); StepVerifier.create(asyncClient2.create()) .expectComplete() .verify(); } @Test void createTableWithResponseAsync() { final String tableName2 = testResourceNamer.randomName("tableName", 20); final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode()); final TableClientBuilder builder = new TableClientBuilder() .connectionString(connectionString) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .tableName(tableName2); if (interceptorManager.isPlaybackMode()) { builder.httpClient(playbackClient); } else { builder.httpClient(HttpClient.createDefault()) .addPolicy(recordPolicy) .addPolicy(new RetryPolicy()); } final TableAsyncClient asyncClient2 = builder.buildAsyncClient(); final int expectedStatusCode = 204; StepVerifier.create(asyncClient2.createWithResponse()) .assertNext(response -> { assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void createEntityAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue); StepVerifier.create(tableClient.createEntity(tableEntity)) .expectComplete() .verify(); } @Test void createEntityWithResponseAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue); final int expectedStatusCode = 204; StepVerifier.create(tableClient.createEntityWithResponse(entity)) .assertNext(response -> { assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void deleteTableAsync() { StepVerifier.create(tableClient.delete()) .expectComplete() .verify(); } @Test void deleteTableWithResponseAsync() { final int expectedStatusCode = 204; StepVerifier.create(tableClient.deleteWithResponse()) .assertNext(response -> { assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void deleteEntityAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue); tableClient.createEntity(tableEntity).block(TIMEOUT); final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT); assertNotNull(createdEntity, "'createdEntity' should not be null."); assertNotNull(createdEntity.getETag(), "'eTag' should not be null."); StepVerifier.create(tableClient.deleteEntity(partitionKeyValue, rowKeyValue)) .expectComplete() .verify(); } @Test void deleteEntityWithResponseAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue); final int expectedStatusCode = 204; tableClient.createEntity(tableEntity).block(TIMEOUT); final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT); assertNotNull(createdEntity, "'createdEntity' should not be null."); assertNotNull(createdEntity.getETag(), "'eTag' should not be null."); StepVerifier.create(tableClient.deleteEntityWithResponse(partitionKeyValue, rowKeyValue, null)) .assertNext(response -> { assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void deleteEntityWithResponseMatchETagAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue); final int expectedStatusCode = 204; tableClient.createEntity(tableEntity).block(TIMEOUT); final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT); assertNotNull(createdEntity, "'createdEntity' should not be null."); assertNotNull(createdEntity.getETag(), "'eTag' should not be null."); StepVerifier.create(tableClient.deleteEntityWithResponse(partitionKeyValue, rowKeyValue, createdEntity.getETag())) .assertNext(response -> { assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void getEntityWithResponseAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue); final int expectedStatusCode = 200; tableClient.createEntity(tableEntity).block(TIMEOUT); StepVerifier.create(tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue)) .assertNext(response -> { final TableEntity entity = response.getValue(); assertEquals(expectedStatusCode, response.getStatusCode()); assertNotNull(entity); assertEquals(tableEntity.getPartitionKey(), entity.getPartitionKey()); assertEquals(tableEntity.getRowKey(), entity.getRowKey()); assertNotNull(entity.getTimestamp()); assertNotNull(entity.getETag()); assertNotNull(entity.getProperties()); }) .expectComplete() .verify(); } @Test void updateEntityWithResponseReplaceAsync() { updateEntityWithResponseAsync(UpdateMode.REPLACE); } @Test void updateEntityWithResponseMergeAsync() { updateEntityWithResponseAsync(UpdateMode.MERGE); } /** * In the case of {@link UpdateMode * In the case of {@link UpdateMode */ void updateEntityWithResponseAsync(UpdateMode mode) { final boolean expectOldProperty = mode == UpdateMode.MERGE; final String partitionKeyValue = testResourceNamer.randomName("APartitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("ARowKey", 20); final int expectedStatusCode = 204; final String oldPropertyKey = "propertyA"; final String newPropertyKey = "propertyB"; final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue) .addProperty(oldPropertyKey, "valueA"); tableClient.createEntity(tableEntity).block(TIMEOUT); final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT); assertNotNull(createdEntity, "'createdEntity' should not be null."); assertNotNull(createdEntity.getETag(), "'eTag' should not be null."); createdEntity.getProperties().remove(oldPropertyKey); createdEntity.addProperty(newPropertyKey, "valueB"); StepVerifier.create(tableClient.updateEntityWithResponse(createdEntity, true, mode)) .assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode())) .expectComplete() .verify(); StepVerifier.create(tableClient.getEntity(partitionKeyValue, rowKeyValue)) .assertNext(entity -> { final Map<String, Object> properties = entity.getProperties(); assertTrue(properties.containsKey(newPropertyKey)); assertEquals(expectOldProperty, properties.containsKey(oldPropertyKey)); }) .verifyComplete(); } @Test @Tag("ListEntities") void listEntitiesAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20); tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)).block(TIMEOUT); tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)).block(TIMEOUT); StepVerifier.create(tableClient.listEntities()) .expectNextCount(2) .thenConsumeWhile(x -> true) .expectComplete() .verify(); } @Test @Tag("ListEntities") void listEntitiesWithFilterAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20); ListEntitiesOptions options = new ListEntitiesOptions().setFilter("RowKey eq '" + rowKeyValue + "'"); tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)).block(TIMEOUT); tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)).block(TIMEOUT); StepVerifier.create(tableClient.listEntities(options)) .assertNext(returnEntity -> { assertEquals(partitionKeyValue, returnEntity.getPartitionKey()); assertEquals(rowKeyValue, returnEntity.getRowKey()); }) .expectNextCount(0) .thenConsumeWhile(x -> true) .expectComplete() .verify(); } @Test @Tag("ListEntities") void listEntitiesWithSelectAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue) .addProperty("propertyC", "valueC") .addProperty("propertyD", "valueD"); ListEntitiesOptions options = new ListEntitiesOptions() .setSelect("propertyC"); tableClient.createEntity(entity).block(TIMEOUT); StepVerifier.create(tableClient.listEntities(options)) .assertNext(returnEntity -> { assertNull(returnEntity.getRowKey()); assertNull(returnEntity.getPartitionKey()); assertEquals("valueC", returnEntity.getProperties().get("propertyC")); assertNull(returnEntity.getProperties().get("propertyD")); }) .expectComplete() .verify(); } @Test @Tag("ListEntities") }
class TablesAsyncClientTest extends TestBase { private static final Duration TIMEOUT = Duration.ofSeconds(30); private TableAsyncClient tableClient; private HttpPipelinePolicy recordPolicy; private HttpClient playbackClient; @Override protected void beforeTest() { final String tableName = testResourceNamer.randomName("tableName", 20); final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode()); final TableClientBuilder builder = new TableClientBuilder() .connectionString(connectionString) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .tableName(tableName); if (interceptorManager.isPlaybackMode()) { playbackClient = interceptorManager.getPlaybackClient(); builder.httpClient(playbackClient); } else { recordPolicy = interceptorManager.getRecordPolicy(); builder.httpClient(HttpClient.createDefault()) .addPolicy(recordPolicy) .addPolicy(new RetryPolicy()); } tableClient = builder.buildAsyncClient(); tableClient.create().block(TIMEOUT); } @Test void createTableAsync() { final String tableName2 = testResourceNamer.randomName("tableName", 20); final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode()); final TableClientBuilder builder = new TableClientBuilder() .connectionString(connectionString) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .tableName(tableName2); if (interceptorManager.isPlaybackMode()) { builder.httpClient(playbackClient); } else { builder.httpClient(HttpClient.createDefault()) .addPolicy(recordPolicy) .addPolicy(new RetryPolicy()); } final TableAsyncClient asyncClient2 = builder.buildAsyncClient(); StepVerifier.create(asyncClient2.create()) .expectComplete() .verify(); } @Test void createTableWithResponseAsync() { final String tableName2 = testResourceNamer.randomName("tableName", 20); final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode()); final TableClientBuilder builder = new TableClientBuilder() .connectionString(connectionString) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .tableName(tableName2); if (interceptorManager.isPlaybackMode()) { builder.httpClient(playbackClient); } else { builder.httpClient(HttpClient.createDefault()) .addPolicy(recordPolicy) .addPolicy(new RetryPolicy()); } final TableAsyncClient asyncClient2 = builder.buildAsyncClient(); final int expectedStatusCode = 204; StepVerifier.create(asyncClient2.createWithResponse()) .assertNext(response -> { assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void createEntityAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue); StepVerifier.create(tableClient.createEntity(tableEntity)) .expectComplete() .verify(); } @Test void createEntityWithResponseAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue); final int expectedStatusCode = 204; StepVerifier.create(tableClient.createEntityWithResponse(entity)) .assertNext(response -> { assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void deleteTableAsync() { StepVerifier.create(tableClient.delete()) .expectComplete() .verify(); } @Test void deleteTableWithResponseAsync() { final int expectedStatusCode = 204; StepVerifier.create(tableClient.deleteWithResponse()) .assertNext(response -> { assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void deleteEntityAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue); tableClient.createEntity(tableEntity).block(TIMEOUT); final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT); assertNotNull(createdEntity, "'createdEntity' should not be null."); assertNotNull(createdEntity.getETag(), "'eTag' should not be null."); StepVerifier.create(tableClient.deleteEntity(partitionKeyValue, rowKeyValue)) .expectComplete() .verify(); } @Test void deleteEntityWithResponseAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue); final int expectedStatusCode = 204; tableClient.createEntity(tableEntity).block(TIMEOUT); final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT); assertNotNull(createdEntity, "'createdEntity' should not be null."); assertNotNull(createdEntity.getETag(), "'eTag' should not be null."); StepVerifier.create(tableClient.deleteEntityWithResponse(partitionKeyValue, rowKeyValue, null)) .assertNext(response -> { assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void deleteEntityWithResponseMatchETagAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue); final int expectedStatusCode = 204; tableClient.createEntity(tableEntity).block(TIMEOUT); final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT); assertNotNull(createdEntity, "'createdEntity' should not be null."); assertNotNull(createdEntity.getETag(), "'eTag' should not be null."); StepVerifier.create(tableClient.deleteEntityWithResponse(partitionKeyValue, rowKeyValue, createdEntity.getETag())) .assertNext(response -> { assertEquals(expectedStatusCode, response.getStatusCode()); }) .expectComplete() .verify(); } @Test void getEntityWithResponseAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue); final int expectedStatusCode = 200; tableClient.createEntity(tableEntity).block(TIMEOUT); StepVerifier.create(tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue)) .assertNext(response -> { final TableEntity entity = response.getValue(); assertEquals(expectedStatusCode, response.getStatusCode()); assertNotNull(entity); assertEquals(tableEntity.getPartitionKey(), entity.getPartitionKey()); assertEquals(tableEntity.getRowKey(), entity.getRowKey()); assertNotNull(entity.getTimestamp()); assertNotNull(entity.getETag()); assertNotNull(entity.getProperties()); }) .expectComplete() .verify(); } @Test void updateEntityWithResponseReplaceAsync() { updateEntityWithResponseAsync(UpdateMode.REPLACE); } @Test void updateEntityWithResponseMergeAsync() { updateEntityWithResponseAsync(UpdateMode.MERGE); } /** * In the case of {@link UpdateMode * In the case of {@link UpdateMode */ void updateEntityWithResponseAsync(UpdateMode mode) { final boolean expectOldProperty = mode == UpdateMode.MERGE; final String partitionKeyValue = testResourceNamer.randomName("APartitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("ARowKey", 20); final int expectedStatusCode = 204; final String oldPropertyKey = "propertyA"; final String newPropertyKey = "propertyB"; final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue) .addProperty(oldPropertyKey, "valueA"); tableClient.createEntity(tableEntity).block(TIMEOUT); final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT); assertNotNull(createdEntity, "'createdEntity' should not be null."); assertNotNull(createdEntity.getETag(), "'eTag' should not be null."); createdEntity.getProperties().remove(oldPropertyKey); createdEntity.addProperty(newPropertyKey, "valueB"); StepVerifier.create(tableClient.updateEntityWithResponse(createdEntity, true, mode)) .assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode())) .expectComplete() .verify(); StepVerifier.create(tableClient.getEntity(partitionKeyValue, rowKeyValue)) .assertNext(entity -> { final Map<String, Object> properties = entity.getProperties(); assertTrue(properties.containsKey(newPropertyKey)); assertEquals(expectOldProperty, properties.containsKey(oldPropertyKey)); }) .verifyComplete(); } @Test @Tag("ListEntities") void listEntitiesAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20); Mono.when( tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)), tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)) ).block(TIMEOUT); StepVerifier.create(tableClient.listEntities()) .expectNextCount(2) .thenConsumeWhile(x -> true) .expectComplete() .verify(); } @Test @Tag("ListEntities") void listEntitiesWithFilterAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20); ListEntitiesOptions options = new ListEntitiesOptions().setFilter("RowKey eq '" + rowKeyValue + "'"); Mono.when( tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)), tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)) ).block(TIMEOUT); StepVerifier.create(tableClient.listEntities(options)) .assertNext(returnEntity -> { assertEquals(partitionKeyValue, returnEntity.getPartitionKey()); assertEquals(rowKeyValue, returnEntity.getRowKey()); }) .expectNextCount(0) .thenConsumeWhile(x -> true) .expectComplete() .verify(); } @Test @Tag("ListEntities") void listEntitiesWithSelectAsync() { final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20); final String rowKeyValue = testResourceNamer.randomName("rowKey", 20); final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue) .addProperty("propertyC", "valueC") .addProperty("propertyD", "valueD"); ListEntitiesOptions options = new ListEntitiesOptions() .setSelect("propertyC"); tableClient.createEntity(entity).block(TIMEOUT); StepVerifier.create(tableClient.listEntities(options)) .assertNext(returnEntity -> { assertNull(returnEntity.getRowKey()); assertNull(returnEntity.getPartitionKey()); assertEquals("valueC", returnEntity.getProperties().get("propertyC")); assertNull(returnEntity.getProperties().get("propertyD")); }) .expectComplete() .verify(); } @Test @Tag("ListEntities") }
Why none? Don't we have some parameter user's set?
public ServiceBusReceiverAsyncClient buildAsyncClient() { final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(logger, entityType, queueName, topicName, subscriptionName, SubQueue.NONE); validateAndThrow(prefetchCount, maxAutoLockRenewalDuration); final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, maxAutoLockRenewalDuration, sessionId, isRollingSessionReceiver(), maxConcurrentSessions); if (CoreUtils.isNullOrEmpty(sessionId)) { final UnnamedSessionManager sessionManager = new UnnamedSessionManager(entityPath, entityType, connectionProcessor, connectionProcessor.getRetryOptions().getTryTimeout(), tracerProvider, messageSerializer, receiverOptions); return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, sessionManager); } else { return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose); } }
SubQueue.NONE);
public ServiceBusReceiverAsyncClient buildAsyncClient() { final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(logger, entityType, queueName, topicName, subscriptionName, SubQueue.NONE); validateAndThrow(prefetchCount, maxAutoLockRenewalDuration); final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, maxAutoLockRenewalDuration, sessionId, isRollingSessionReceiver(), maxConcurrentSessions); if (CoreUtils.isNullOrEmpty(sessionId)) { final UnnamedSessionManager sessionManager = new UnnamedSessionManager(entityPath, entityType, connectionProcessor, connectionProcessor.getRetryOptions().getTryTimeout(), tracerProvider, messageSerializer, receiverOptions); return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, sessionManager); } else { return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose); } }
class ServiceBusSessionReceiverClientBuilder { private Integer maxConcurrentSessions = null; private int prefetchCount = DEFAULT_PREFETCH_COUNT; private String queueName; private ReceiveMode receiveMode = ReceiveMode.PEEK_LOCK; private String sessionId; private String subscriptionName; private String topicName; private Duration maxAutoLockRenewalDuration; private ServiceBusSessionReceiverClientBuilder() { } /** * Enables auto-lock renewal by renewing each session lock until the {@code maxAutoLockRenewalDuration} has * elapsed. * * @param maxAutoLockRenewalDuration Maximum amount of time to renew the session lock. * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewalDuration(Duration maxAutoLockRenewalDuration) { this.maxAutoLockRenewalDuration = maxAutoLockRenewalDuration; return this; } /** * Enables session processing roll-over by processing at most {@code maxConcurrentSessions}. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ public ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError(new IllegalArgumentException( "maxConcurrentSessions cannot be less than 1.")); } this.maxConcurrentSessions = maxConcurrentSessions; return this; } /** * Sets the prefetch count of the receiver. For both {@link ReceiveMode * ReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application asks for one using {@link ServiceBusReceiverAsyncClient * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off. * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) { this.prefetchCount = prefetchCount; return this; } /** * Sets the name of the queue to create a receiver for. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the receive mode for the receiver. * * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder receiveMode(ReceiveMode receiveMode) { this.receiveMode = receiveMode; return this; } /** * Sets the session id. * * @param sessionId session id. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder sessionId(String sessionId) { this.sessionId = sessionId; return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) { this.subscriptionName = subscriptionName; return this; } /** * Sets the name of the topic. <b>{@link * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading * {@link ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ /** * Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading * {@link ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusReceiverClient buildClient() { return new ServiceBusReceiverClient(buildAsyncClient(), retryOptions.getTryTimeout()); } /** * This is a rolling session receiver only if maxConcurrentSessions is > 0 AND sessionId is null or empty. If * there is a sessionId, this is going to be a single, named session receiver. * * @return {@code true} if this is an unnamed rolling session receiver; {@code false} otherwise. */ private boolean isRollingSessionReceiver() { if (maxConcurrentSessions == null) { return false; } if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("Maximum number of concurrent sessions must be positive.")); } return CoreUtils.isNullOrEmpty(sessionId); } }
class ServiceBusSessionReceiverClientBuilder { private Integer maxConcurrentSessions = null; private int prefetchCount = DEFAULT_PREFETCH_COUNT; private String queueName; private ReceiveMode receiveMode = ReceiveMode.PEEK_LOCK; private String sessionId; private String subscriptionName; private String topicName; private Duration maxAutoLockRenewalDuration; private ServiceBusSessionReceiverClientBuilder() { } /** * Enables auto-lock renewal by renewing each session lock until the {@code maxAutoLockRenewalDuration} has * elapsed. * * @param maxAutoLockRenewalDuration Maximum amount of time to renew the session lock. * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewalDuration(Duration maxAutoLockRenewalDuration) { this.maxAutoLockRenewalDuration = maxAutoLockRenewalDuration; return this; } /** * Enables session processing roll-over by processing at most {@code maxConcurrentSessions}. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ public ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError(new IllegalArgumentException( "maxConcurrentSessions cannot be less than 1.")); } this.maxConcurrentSessions = maxConcurrentSessions; return this; } /** * Sets the prefetch count of the receiver. For both {@link ReceiveMode * ReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application asks for one using {@link ServiceBusReceiverAsyncClient * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off. * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) { this.prefetchCount = prefetchCount; return this; } /** * Sets the name of the queue to create a receiver for. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the receive mode for the receiver. * * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder receiveMode(ReceiveMode receiveMode) { this.receiveMode = receiveMode; return this; } /** * Sets the session id. * * @param sessionId session id. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder sessionId(String sessionId) { this.sessionId = sessionId; return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) { this.subscriptionName = subscriptionName; return this; } /** * Sets the name of the topic. <b>{@link * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading * {@link ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ /** * Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading * {@link ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusReceiverClient buildClient() { return new ServiceBusReceiverClient(buildAsyncClient(), retryOptions.getTryTimeout()); } /** * This is a rolling session receiver only if maxConcurrentSessions is > 0 AND sessionId is null or empty. If * there is a sessionId, this is going to be a single, named session receiver. * * @return {@code true} if this is an unnamed rolling session receiver; {@code false} otherwise. */ private boolean isRollingSessionReceiver() { if (maxConcurrentSessions == null) { return false; } if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("Maximum number of concurrent sessions must be positive.")); } return CoreUtils.isNullOrEmpty(sessionId); } }
`final Long[] longs` to be consistent.
public Mono<Void> cancelScheduledMessages(Iterable<Long> sequenceNumbers, String associatedLinkName) { return isAuthorized(ManagementConstants.OPERATION_CANCEL_SCHEDULED_MESSAGE) .then(createChannel.flatMap(channel -> { final Message requestMessage = createManagementMessage( ManagementConstants.OPERATION_CANCEL_SCHEDULED_MESSAGE, associatedLinkName); final List<Long> numbers = new ArrayList<>(); sequenceNumbers.forEach(s -> numbers.add(s)); Long[] longs = numbers.toArray(new Long[0]); requestMessage.setBody(new AmqpValue(Collections.singletonMap(ManagementConstants.SEQUENCE_NUMBERS, longs))); return sendWithVerify(channel, requestMessage, null); })).then(); }
Long[] longs = numbers.toArray(new Long[0]);
public Mono<Void> cancelScheduledMessages(Iterable<Long> sequenceNumbers, String associatedLinkName) { return isAuthorized(ManagementConstants.OPERATION_CANCEL_SCHEDULED_MESSAGE) .then(createChannel.flatMap(channel -> { final Message requestMessage = createManagementMessage( ManagementConstants.OPERATION_CANCEL_SCHEDULED_MESSAGE, associatedLinkName); final List<Long> numbers = new ArrayList<>(); sequenceNumbers.forEach(s -> numbers.add(s)); final Long[] longs = numbers.toArray(new Long[0]); requestMessage.setBody(new AmqpValue(Collections.singletonMap(ManagementConstants.SEQUENCE_NUMBERS, longs))); return sendWithVerify(channel, requestMessage, null); })).then(); }
class ManagementChannel implements ServiceBusManagementNode { private final MessageSerializer messageSerializer; private final TokenManager tokenManager; private final Duration operationTimeout; private final Mono<RequestResponseChannel> createChannel; private final String fullyQualifiedNamespace; private final ClientLogger logger; private final String entityPath; private volatile boolean isDisposed; ManagementChannel(Mono<RequestResponseChannel> createChannel, String fullyQualifiedNamespace, String entityPath, TokenManager tokenManager, MessageSerializer messageSerializer, Duration operationTimeout) { this.createChannel = Objects.requireNonNull(createChannel, "'createChannel' cannot be null."); this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.logger = new ClientLogger(String.format("%s<%s>", ManagementChannel.class, entityPath)); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.tokenManager = Objects.requireNonNull(tokenManager, "'tokenManager' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); } /** * {@inheritDoc} */ @Override public Mono<Void> cancelScheduledMessage(long sequenceNumber, String associatedLinkName, ServiceBusTransactionContext transactionContext) { return isAuthorized(ManagementConstants.OPERATION_CANCEL_SCHEDULED_MESSAGE) .then(createChannel.flatMap(channel -> { final Message requestMessage = createManagementMessage( ManagementConstants.OPERATION_CANCEL_SCHEDULED_MESSAGE, associatedLinkName); requestMessage.setBody(new AmqpValue(Collections.singletonMap(ManagementConstants.SEQUENCE_NUMBERS, new Long[]{sequenceNumber}))); TransactionalState transactionalState = null; if (transactionContext != null && transactionContext.getTransactionId() != null) { transactionalState = new TransactionalState(); transactionalState.setTxnId(new Binary(transactionContext.getTransactionId().array())); } return sendWithVerify(channel, requestMessage, transactionalState); })).then(); } /** * {@inheritDoc} */ @Override /** * {@inheritDoc} */ @Override public Mono<byte[]> getSessionState(String sessionId, String associatedLinkName) { if (sessionId == null) { return monoError(logger, new NullPointerException("'sessionId' cannot be null.")); } else if (sessionId.isEmpty()) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be blank.")); } return isAuthorized(OPERATION_GET_SESSION_STATE).then(createChannel.flatMap(channel -> { final Message message = createManagementMessage(OPERATION_GET_SESSION_STATE, associatedLinkName); final Map<String, Object> body = new HashMap<>(); body.put(ManagementConstants.SESSION_ID, sessionId); message.setBody(new AmqpValue(body)); return sendWithVerify(channel, message, null); })).flatMap(response -> { final Object value = ((AmqpValue) response.getBody()).getValue(); if (!(value instanceof Map)) { return monoError(logger, Exceptions.propagate(new AmqpException(false, String.format( "Body not expected when renewing session. Id: %s. Value: %s", sessionId, value), getErrorContext()))); } @SuppressWarnings("unchecked") final Map<String, Object> map = (Map<String, Object>) value; final Object sessionState = map.get(ManagementConstants.SESSION_STATE); if (sessionState == null) { logger.info("sessionId[{}]. Does not have a session state.", sessionId); return Mono.empty(); } final byte[] state = ((Binary) sessionState).getArray(); return Mono.just(state); }); } /** * {@inheritDoc} */ @Override public Mono<ServiceBusReceivedMessage> peek(long fromSequenceNumber, String sessionId, String associatedLinkName) { return peek(fromSequenceNumber, sessionId, associatedLinkName, 1) .last(); } /** * {@inheritDoc} */ @Override public Flux<ServiceBusReceivedMessage> peek(long fromSequenceNumber, String sessionId, String associatedLinkName, int maxMessages) { return isAuthorized(OPERATION_PEEK).thenMany(createChannel.flatMap(channel -> { final Message message = createManagementMessage(OPERATION_PEEK, associatedLinkName); final Map<String, Object> requestBody = new HashMap<>(); requestBody.put(ManagementConstants.FROM_SEQUENCE_NUMBER, fromSequenceNumber); requestBody.put(ManagementConstants.MESSAGE_COUNT_KEY, maxMessages); if (!CoreUtils.isNullOrEmpty(sessionId)) { requestBody.put(ManagementConstants.SESSION_ID, sessionId); } message.setBody(new AmqpValue(requestBody)); return sendWithVerify(channel, message, null); }).flatMapMany(response -> { final List<ServiceBusReceivedMessage> messages = messageSerializer.deserializeList(response, ServiceBusReceivedMessage.class); return Flux.fromIterable(messages); })); } /** * {@inheritDoc} */ @Override public Flux<ServiceBusReceivedMessage> receiveDeferredMessages(ReceiveMode receiveMode, String sessionId, String associatedLinkName, Iterable<Long> sequenceNumbers) { return isAuthorized(ManagementConstants.OPERATION_RECEIVE_BY_SEQUENCE_NUMBER) .thenMany(createChannel.flatMap(channel -> { final Message message = createManagementMessage( ManagementConstants.OPERATION_RECEIVE_BY_SEQUENCE_NUMBER, associatedLinkName); final Map<String, Object> requestBodyMap = new HashMap<>(); final List<Long> numbers = new ArrayList<>(); sequenceNumbers.forEach(s -> numbers.add(s)); Long[] longs = numbers.toArray(new Long[0]); requestBodyMap.put(ManagementConstants.SEQUENCE_NUMBERS, longs); requestBodyMap.put(ManagementConstants.RECEIVER_SETTLE_MODE, UnsignedInteger.valueOf(receiveMode == ReceiveMode.RECEIVE_AND_DELETE ? 0 : 1)); if (!CoreUtils.isNullOrEmpty(sessionId)) { requestBodyMap.put(ManagementConstants.SESSION_ID, sessionId); } message.setBody(new AmqpValue(requestBodyMap)); return sendWithVerify(channel, message, null); }).flatMapMany(amqpMessage -> { final List<ServiceBusReceivedMessage> messageList = messageSerializer.deserializeList(amqpMessage, ServiceBusReceivedMessage.class); return Flux.fromIterable(messageList); })); } /** * {@inheritDoc} */ @Override public Mono<Instant> renewMessageLock(String lockToken, String associatedLinkName) { return isAuthorized(OPERATION_PEEK).then(createChannel.flatMap(channel -> { final Message requestMessage = createManagementMessage(ManagementConstants.OPERATION_RENEW_LOCK, associatedLinkName); final Map<String, Object> requestBody = new HashMap<>(); requestBody.put(ManagementConstants.LOCK_TOKENS_KEY, new UUID[]{UUID.fromString(lockToken)}); requestMessage.setBody(new AmqpValue(requestBody)); return sendWithVerify(channel, requestMessage, null); }).map(responseMessage -> { final List<Instant> renewTimeList = messageSerializer.deserializeList(responseMessage, Instant.class); if (CoreUtils.isNullOrEmpty(renewTimeList)) { throw logger.logExceptionAsError(Exceptions.propagate(new AmqpException(false, String.format( "Service bus response empty. Could not renew message with lock token: '%s'.", lockToken), getErrorContext()))); } return renewTimeList.get(0); })); } @Override public Mono<Instant> renewSessionLock(String sessionId, String associatedLinkName) { if (sessionId == null) { return monoError(logger, new NullPointerException("'sessionId' cannot be null.")); } else if (sessionId.isEmpty()) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be blank.")); } return isAuthorized(OPERATION_RENEW_SESSION_LOCK).then(createChannel.flatMap(channel -> { final Message message = createManagementMessage(OPERATION_RENEW_SESSION_LOCK, associatedLinkName); final Map<String, Object> body = new HashMap<>(); body.put(ManagementConstants.SESSION_ID, sessionId); message.setBody(new AmqpValue(body)); return sendWithVerify(channel, message, null); })).map(response -> { final Object value = ((AmqpValue) response.getBody()).getValue(); if (!(value instanceof Map)) { throw logger.logExceptionAsError(Exceptions.propagate(new AmqpException(false, String.format( "Body not expected when renewing session. Id: %s. Value: %s", sessionId, value), getErrorContext()))); } @SuppressWarnings("unchecked") final Map<String, Object> map = (Map<String, Object>) value; final Object expirationValue = map.get(ManagementConstants.EXPIRATION); if (!(expirationValue instanceof Date)) { throw logger.logExceptionAsError(Exceptions.propagate(new AmqpException(false, String.format( "Expiration is not of type Date when renewing session. Id: %s. Value: %s", sessionId, expirationValue), getErrorContext()))); } return ((Date) expirationValue).toInstant(); }); } /** * {@inheritDoc} */ @Override public Flux<Long> schedule(final List<ServiceBusMessage> messages, OffsetDateTime scheduledEnqueueTime, int maxLinkSize, String associatedLinkName, ServiceBusTransactionContext transactionContext) { return isAuthorized(OPERATION_SCHEDULE_MESSAGE).thenMany(createChannel.flatMap(channel -> { final Collection<Map<String, Object>> messageList = new LinkedList<>(); for (ServiceBusMessage message : messages) { message.setScheduledEnqueueTime(scheduledEnqueueTime); final Message amqpMessage = messageSerializer.serialize(message); final int payloadSize = messageSerializer.getSize(amqpMessage); final int allocationSize = Math.min(payloadSize + ManagementConstants.MAX_MESSAGING_AMQP_HEADER_SIZE_BYTES, maxLinkSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = amqpMessage.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format( "Error sending. Size of the payload exceeded maximum message size: %s kb", maxLinkSize / 1024); final AmqpErrorContext errorContext = channel.getErrorContext(); return monoError(logger, Exceptions.propagate(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, errorContext))); } final Map<String, Object> messageEntry = new HashMap<>(); messageEntry.put(ManagementConstants.MESSAGE, new Binary(bytes, 0, encodedSize)); messageEntry.put(ManagementConstants.MESSAGE_ID, amqpMessage.getMessageId()); final String sessionId = amqpMessage.getGroupId(); if (!CoreUtils.isNullOrEmpty(sessionId)) { messageEntry.put(ManagementConstants.SESSION_ID, sessionId); } final String partitionKey = message.getPartitionKey(); if (!CoreUtils.isNullOrEmpty(partitionKey)) { messageEntry.put(ManagementConstants.PARTITION_KEY, partitionKey); } final String viaPartitionKey = message.getViaPartitionKey(); if (!CoreUtils.isNullOrEmpty(viaPartitionKey)) { messageEntry.put(ManagementConstants.VIA_PARTITION_KEY, viaPartitionKey); } messageList.add(messageEntry); } final Map<String, Object> requestBodyMap = new HashMap<>(); requestBodyMap.put(ManagementConstants.MESSAGES, messageList); final Message requestMessage = createManagementMessage(OPERATION_SCHEDULE_MESSAGE, associatedLinkName); requestMessage.setBody(new AmqpValue(requestBodyMap)); TransactionalState transactionalState = null; if (transactionContext != null && transactionContext.getTransactionId() != null) { transactionalState = new TransactionalState(); transactionalState.setTxnId(new Binary(transactionContext.getTransactionId().array())); } return sendWithVerify(channel, requestMessage, transactionalState); }) .flatMapMany(response -> { final List<Long> sequenceNumbers = messageSerializer.deserializeList(response, Long.class); if (CoreUtils.isNullOrEmpty(sequenceNumbers)) { fluxError(logger, new AmqpException(false, String.format( "Service Bus response was empty. Could not schedule message()s."), getErrorContext())); } return Flux.fromIterable(sequenceNumbers); })); } @Override public Mono<Void> setSessionState(String sessionId, byte[] state, String associatedLinkName) { if (sessionId == null) { return monoError(logger, new NullPointerException("'sessionId' cannot be null.")); } else if (sessionId.isEmpty()) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be blank.")); } return isAuthorized(OPERATION_SET_SESSION_STATE).then(createChannel.flatMap(channel -> { final Message message = createManagementMessage(OPERATION_SET_SESSION_STATE, associatedLinkName); final Map<String, Object> body = new HashMap<>(); body.put(ManagementConstants.SESSION_ID, sessionId); body.put(ManagementConstants.SESSION_STATE, state == null ? null : new Binary(state)); message.setBody(new AmqpValue(body)); return sendWithVerify(channel, message, null).then(); })); } @Override public Mono<Void> updateDisposition(String lockToken, DispositionStatus dispositionStatus, String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify, String sessionId, String associatedLinkName, ServiceBusTransactionContext transactionContext) { final UUID[] lockTokens = new UUID[]{UUID.fromString(lockToken)}; return isAuthorized(OPERATION_UPDATE_DISPOSITION).then(createChannel.flatMap(channel -> { logger.verbose("Update disposition of deliveries '{}' to '{}' on entity '{}', session '{}'", Arrays.toString(lockTokens), dispositionStatus, entityPath, sessionId); final Message message = createManagementMessage(OPERATION_UPDATE_DISPOSITION, associatedLinkName); final Map<String, Object> requestBody = new HashMap<>(); requestBody.put(ManagementConstants.LOCK_TOKENS_KEY, lockTokens); requestBody.put(ManagementConstants.DISPOSITION_STATUS_KEY, dispositionStatus.getValue()); if (deadLetterReason != null) { requestBody.put(ManagementConstants.DEADLETTER_REASON_KEY, deadLetterReason); } if (deadLetterErrorDescription != null) { requestBody.put(ManagementConstants.DEADLETTER_DESCRIPTION_KEY, deadLetterErrorDescription); } if (propertiesToModify != null && propertiesToModify.size() > 0) { requestBody.put(ManagementConstants.PROPERTIES_TO_MODIFY_KEY, propertiesToModify); } if (!CoreUtils.isNullOrEmpty(sessionId)) { requestBody.put(ManagementConstants.SESSION_ID, sessionId); } message.setBody(new AmqpValue(requestBody)); TransactionalState transactionalState = null; if (transactionContext != null && transactionContext.getTransactionId() != null) { transactionalState = new TransactionalState(); transactionalState.setTxnId(new Binary(transactionContext.getTransactionId().array())); } return sendWithVerify(channel, message, transactionalState); })).then(); } /** * {@inheritDoc} */ @Override public void close() { if (isDisposed) { return; } isDisposed = true; tokenManager.close(); } private Mono<Message> sendWithVerify(RequestResponseChannel channel, Message message, DeliveryState deliveryState) { return channel.sendWithAck(message, deliveryState) .handle((Message response, SynchronousSink<Message> sink) -> { if (RequestResponseUtils.isSuccessful(response)) { sink.next(response); return; } final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(response); final String statusDescription = RequestResponseUtils.getStatusDescription(response); final String errorCondition = RequestResponseUtils.getErrorCondition(response); final Throwable throwable = MessageUtils.toException( new ErrorCondition(Symbol.getSymbol(errorCondition), statusDescription), channel.getErrorContext()); logger.warning("status[{}] description[{}] condition[{}] Operation not successful.", statusCode, statusDescription, errorCondition); sink.error(throwable); }) .switchIfEmpty(Mono.error(new AmqpException(true, "No response received from management channel.", channel.getErrorContext()))); } private Mono<Void> isAuthorized(String operation) { return tokenManager.getAuthorizationResults() .next() .handle((response, sink) -> { if (response != AmqpResponseCode.ACCEPTED && response != AmqpResponseCode.OK) { sink.error(new AmqpException(false, String.format( "User does not have authorization to perform operation [%s] on entity [%s]. Response: [%s]", operation, entityPath, response), getErrorContext())); } else { sink.complete(); } }); } /** * Creates an AMQP message with the required application properties. * * @param operation Management operation to perform (ie. peek, update-disposition, etc.) * @param associatedLinkName Name of the open receive link that first received the message. * * @return An AMQP message with the required headers. */ private Message createManagementMessage(String operation, String associatedLinkName) { final Duration serverTimeout = MessageUtils.adjustServerTimeout(operationTimeout); final Map<String, Object> applicationProperties = new HashMap<>(); applicationProperties.put(ManagementConstants.MANAGEMENT_OPERATION_KEY, operation); applicationProperties.put(ManagementConstants.SERVER_TIMEOUT, serverTimeout.toMillis()); if (!CoreUtils.isNullOrEmpty(associatedLinkName)) { applicationProperties.put(ManagementConstants.ASSOCIATED_LINK_NAME_KEY, associatedLinkName); } final Message message = Proton.message(); message.setApplicationProperties(new ApplicationProperties(applicationProperties)); return message; } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(fullyQualifiedNamespace, entityPath); } }
class ManagementChannel implements ServiceBusManagementNode { private final MessageSerializer messageSerializer; private final TokenManager tokenManager; private final Duration operationTimeout; private final Mono<RequestResponseChannel> createChannel; private final String fullyQualifiedNamespace; private final ClientLogger logger; private final String entityPath; private volatile boolean isDisposed; ManagementChannel(Mono<RequestResponseChannel> createChannel, String fullyQualifiedNamespace, String entityPath, TokenManager tokenManager, MessageSerializer messageSerializer, Duration operationTimeout) { this.createChannel = Objects.requireNonNull(createChannel, "'createChannel' cannot be null."); this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.logger = new ClientLogger(String.format("%s<%s>", ManagementChannel.class, entityPath)); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.tokenManager = Objects.requireNonNull(tokenManager, "'tokenManager' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); } /** * {@inheritDoc} */ @Override /** * {@inheritDoc} */ @Override public Mono<byte[]> getSessionState(String sessionId, String associatedLinkName) { if (sessionId == null) { return monoError(logger, new NullPointerException("'sessionId' cannot be null.")); } else if (sessionId.isEmpty()) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be blank.")); } return isAuthorized(OPERATION_GET_SESSION_STATE).then(createChannel.flatMap(channel -> { final Message message = createManagementMessage(OPERATION_GET_SESSION_STATE, associatedLinkName); final Map<String, Object> body = new HashMap<>(); body.put(ManagementConstants.SESSION_ID, sessionId); message.setBody(new AmqpValue(body)); return sendWithVerify(channel, message, null); })).flatMap(response -> { final Object value = ((AmqpValue) response.getBody()).getValue(); if (!(value instanceof Map)) { return monoError(logger, Exceptions.propagate(new AmqpException(false, String.format( "Body not expected when renewing session. Id: %s. Value: %s", sessionId, value), getErrorContext()))); } @SuppressWarnings("unchecked") final Map<String, Object> map = (Map<String, Object>) value; final Object sessionState = map.get(ManagementConstants.SESSION_STATE); if (sessionState == null) { logger.info("sessionId[{}]. Does not have a session state.", sessionId); return Mono.empty(); } final byte[] state = ((Binary) sessionState).getArray(); return Mono.just(state); }); } /** * {@inheritDoc} */ @Override public Mono<ServiceBusReceivedMessage> peek(long fromSequenceNumber, String sessionId, String associatedLinkName) { return peek(fromSequenceNumber, sessionId, associatedLinkName, 1) .next(); } /** * {@inheritDoc} */ @Override public Flux<ServiceBusReceivedMessage> peek(long fromSequenceNumber, String sessionId, String associatedLinkName, int maxMessages) { return isAuthorized(OPERATION_PEEK).thenMany(createChannel.flatMap(channel -> { final Message message = createManagementMessage(OPERATION_PEEK, associatedLinkName); final Map<String, Object> requestBody = new HashMap<>(); requestBody.put(ManagementConstants.FROM_SEQUENCE_NUMBER, fromSequenceNumber); requestBody.put(ManagementConstants.MESSAGE_COUNT_KEY, maxMessages); if (!CoreUtils.isNullOrEmpty(sessionId)) { requestBody.put(ManagementConstants.SESSION_ID, sessionId); } message.setBody(new AmqpValue(requestBody)); return sendWithVerify(channel, message, null); }).flatMapMany(response -> { final List<ServiceBusReceivedMessage> messages = messageSerializer.deserializeList(response, ServiceBusReceivedMessage.class); return Flux.fromIterable(messages); })); } /** * {@inheritDoc} */ @Override public Flux<ServiceBusReceivedMessage> receiveDeferredMessages(ReceiveMode receiveMode, String sessionId, String associatedLinkName, Iterable<Long> sequenceNumbers) { return isAuthorized(ManagementConstants.OPERATION_RECEIVE_BY_SEQUENCE_NUMBER) .thenMany(createChannel.flatMap(channel -> { final Message message = createManagementMessage( ManagementConstants.OPERATION_RECEIVE_BY_SEQUENCE_NUMBER, associatedLinkName); final Map<String, Object> requestBodyMap = new HashMap<>(); final List<Long> numbers = new ArrayList<>(); sequenceNumbers.forEach(s -> numbers.add(s)); Long[] longs = numbers.toArray(new Long[0]); requestBodyMap.put(ManagementConstants.SEQUENCE_NUMBERS, longs); requestBodyMap.put(ManagementConstants.RECEIVER_SETTLE_MODE, UnsignedInteger.valueOf(receiveMode == ReceiveMode.RECEIVE_AND_DELETE ? 0 : 1)); if (!CoreUtils.isNullOrEmpty(sessionId)) { requestBodyMap.put(ManagementConstants.SESSION_ID, sessionId); } message.setBody(new AmqpValue(requestBodyMap)); return sendWithVerify(channel, message, null); }).flatMapMany(amqpMessage -> { final List<ServiceBusReceivedMessage> messageList = messageSerializer.deserializeList(amqpMessage, ServiceBusReceivedMessage.class); return Flux.fromIterable(messageList); })); } /** * {@inheritDoc} */ @Override public Mono<OffsetDateTime> renewMessageLock(String lockToken, String associatedLinkName) { return isAuthorized(OPERATION_PEEK).then(createChannel.flatMap(channel -> { final Message requestMessage = createManagementMessage(ManagementConstants.OPERATION_RENEW_LOCK, associatedLinkName); final Map<String, Object> requestBody = new HashMap<>(); requestBody.put(ManagementConstants.LOCK_TOKENS_KEY, new UUID[]{UUID.fromString(lockToken)}); requestMessage.setBody(new AmqpValue(requestBody)); return sendWithVerify(channel, requestMessage, null); }).map(responseMessage -> { final List<OffsetDateTime> renewTimeList = messageSerializer.deserializeList(responseMessage, OffsetDateTime.class); if (CoreUtils.isNullOrEmpty(renewTimeList)) { throw logger.logExceptionAsError(Exceptions.propagate(new AmqpException(false, String.format( "Service bus response empty. Could not renew message with lock token: '%s'.", lockToken), getErrorContext()))); } return renewTimeList.get(0); })); } @Override public Mono<OffsetDateTime> renewSessionLock(String sessionId, String associatedLinkName) { if (sessionId == null) { return monoError(logger, new NullPointerException("'sessionId' cannot be null.")); } else if (sessionId.isEmpty()) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be blank.")); } return isAuthorized(OPERATION_RENEW_SESSION_LOCK).then(createChannel.flatMap(channel -> { final Message message = createManagementMessage(OPERATION_RENEW_SESSION_LOCK, associatedLinkName); final Map<String, Object> body = new HashMap<>(); body.put(ManagementConstants.SESSION_ID, sessionId); message.setBody(new AmqpValue(body)); return sendWithVerify(channel, message, null); })).map(response -> { final Object value = ((AmqpValue) response.getBody()).getValue(); if (!(value instanceof Map)) { throw logger.logExceptionAsError(Exceptions.propagate(new AmqpException(false, String.format( "Body not expected when renewing session. Id: %s. Value: %s", sessionId, value), getErrorContext()))); } @SuppressWarnings("unchecked") final Map<String, Object> map = (Map<String, Object>) value; final Object expirationValue = map.get(ManagementConstants.EXPIRATION); if (!(expirationValue instanceof Date)) { throw logger.logExceptionAsError(Exceptions.propagate(new AmqpException(false, String.format( "Expiration is not of type Date when renewing session. Id: %s. Value: %s", sessionId, expirationValue), getErrorContext()))); } return ((Date) expirationValue).toInstant().atOffset(ZoneOffset.UTC); }); } /** * {@inheritDoc} */ @Override public Flux<Long> schedule(List<ServiceBusMessage> messages, OffsetDateTime scheduledEnqueueTime, int maxLinkSize, String associatedLinkName, ServiceBusTransactionContext transactionContext) { return isAuthorized(OPERATION_SCHEDULE_MESSAGE).thenMany(createChannel.flatMap(channel -> { final Collection<Map<String, Object>> messageList = new LinkedList<>(); for (ServiceBusMessage message : messages) { message.setScheduledEnqueueTime(scheduledEnqueueTime); final Message amqpMessage = messageSerializer.serialize(message); final int payloadSize = messageSerializer.getSize(amqpMessage); final int allocationSize = Math.min(payloadSize + ManagementConstants.MAX_MESSAGING_AMQP_HEADER_SIZE_BYTES, maxLinkSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = amqpMessage.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format( "Error sending. Size of the payload exceeded maximum message size: %s kb", maxLinkSize / 1024); final AmqpErrorContext errorContext = channel.getErrorContext(); return monoError(logger, Exceptions.propagate(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, errorContext))); } final Map<String, Object> messageEntry = new HashMap<>(); messageEntry.put(ManagementConstants.MESSAGE, new Binary(bytes, 0, encodedSize)); messageEntry.put(ManagementConstants.MESSAGE_ID, amqpMessage.getMessageId()); final String sessionId = amqpMessage.getGroupId(); if (!CoreUtils.isNullOrEmpty(sessionId)) { messageEntry.put(ManagementConstants.SESSION_ID, sessionId); } final String partitionKey = message.getPartitionKey(); if (!CoreUtils.isNullOrEmpty(partitionKey)) { messageEntry.put(ManagementConstants.PARTITION_KEY, partitionKey); } final String viaPartitionKey = message.getViaPartitionKey(); if (!CoreUtils.isNullOrEmpty(viaPartitionKey)) { messageEntry.put(ManagementConstants.VIA_PARTITION_KEY, viaPartitionKey); } messageList.add(messageEntry); } final Map<String, Object> requestBodyMap = new HashMap<>(); requestBodyMap.put(ManagementConstants.MESSAGES, messageList); final Message requestMessage = createManagementMessage(OPERATION_SCHEDULE_MESSAGE, associatedLinkName); requestMessage.setBody(new AmqpValue(requestBodyMap)); TransactionalState transactionalState = null; if (transactionContext != null && transactionContext.getTransactionId() != null) { transactionalState = new TransactionalState(); transactionalState.setTxnId(new Binary(transactionContext.getTransactionId().array())); } return sendWithVerify(channel, requestMessage, transactionalState); }) .flatMapMany(response -> { final List<Long> sequenceNumbers = messageSerializer.deserializeList(response, Long.class); if (CoreUtils.isNullOrEmpty(sequenceNumbers)) { fluxError(logger, new AmqpException(false, String.format( "Service Bus response was empty. Could not schedule message()s."), getErrorContext())); } return Flux.fromIterable(sequenceNumbers); })); } @Override public Mono<Void> setSessionState(String sessionId, byte[] state, String associatedLinkName) { if (sessionId == null) { return monoError(logger, new NullPointerException("'sessionId' cannot be null.")); } else if (sessionId.isEmpty()) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be blank.")); } return isAuthorized(OPERATION_SET_SESSION_STATE).then(createChannel.flatMap(channel -> { final Message message = createManagementMessage(OPERATION_SET_SESSION_STATE, associatedLinkName); final Map<String, Object> body = new HashMap<>(); body.put(ManagementConstants.SESSION_ID, sessionId); body.put(ManagementConstants.SESSION_STATE, state == null ? null : new Binary(state)); message.setBody(new AmqpValue(body)); return sendWithVerify(channel, message, null).then(); })); } @Override public Mono<Void> updateDisposition(String lockToken, DispositionStatus dispositionStatus, String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify, String sessionId, String associatedLinkName, ServiceBusTransactionContext transactionContext) { final UUID[] lockTokens = new UUID[]{UUID.fromString(lockToken)}; return isAuthorized(OPERATION_UPDATE_DISPOSITION).then(createChannel.flatMap(channel -> { logger.verbose("Update disposition of deliveries '{}' to '{}' on entity '{}', session '{}'", Arrays.toString(lockTokens), dispositionStatus, entityPath, sessionId); final Message message = createManagementMessage(OPERATION_UPDATE_DISPOSITION, associatedLinkName); final Map<String, Object> requestBody = new HashMap<>(); requestBody.put(ManagementConstants.LOCK_TOKENS_KEY, lockTokens); requestBody.put(ManagementConstants.DISPOSITION_STATUS_KEY, dispositionStatus.getValue()); if (deadLetterReason != null) { requestBody.put(ManagementConstants.DEADLETTER_REASON_KEY, deadLetterReason); } if (deadLetterErrorDescription != null) { requestBody.put(ManagementConstants.DEADLETTER_DESCRIPTION_KEY, deadLetterErrorDescription); } if (propertiesToModify != null && propertiesToModify.size() > 0) { requestBody.put(ManagementConstants.PROPERTIES_TO_MODIFY_KEY, propertiesToModify); } if (!CoreUtils.isNullOrEmpty(sessionId)) { requestBody.put(ManagementConstants.SESSION_ID, sessionId); } message.setBody(new AmqpValue(requestBody)); TransactionalState transactionalState = null; if (transactionContext != null && transactionContext.getTransactionId() != null) { transactionalState = new TransactionalState(); transactionalState.setTxnId(new Binary(transactionContext.getTransactionId().array())); } return sendWithVerify(channel, message, transactionalState); })).then(); } /** * {@inheritDoc} */ @Override public void close() { if (isDisposed) { return; } isDisposed = true; tokenManager.close(); } private Mono<Message> sendWithVerify(RequestResponseChannel channel, Message message, DeliveryState deliveryState) { return channel.sendWithAck(message, deliveryState) .handle((Message response, SynchronousSink<Message> sink) -> { if (RequestResponseUtils.isSuccessful(response)) { sink.next(response); return; } final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(response); if (statusCode == AmqpResponseCode.NO_CONTENT) { sink.next(response); return; } final String errorCondition = RequestResponseUtils.getErrorCondition(response); if (statusCode == AmqpResponseCode.NOT_FOUND) { final AmqpErrorCondition amqpErrorCondition = AmqpErrorCondition.fromString(errorCondition); if (amqpErrorCondition == AmqpErrorCondition.MESSAGE_NOT_FOUND) { logger.info("There was no matching message found."); sink.next(response); return; } else if (amqpErrorCondition == AmqpErrorCondition.SESSION_NOT_FOUND) { logger.info("There was no matching session found."); sink.next(response); return; } } final String statusDescription = RequestResponseUtils.getStatusDescription(response); final Throwable throwable = ExceptionUtil.toException(errorCondition, statusDescription, channel.getErrorContext()); logger.warning("status[{}] description[{}] condition[{}] Operation not successful.", statusCode, statusDescription, errorCondition); sink.error(throwable); }) .switchIfEmpty(Mono.error(new AmqpException(true, "No response received from management channel.", channel.getErrorContext()))); } private Mono<Void> isAuthorized(String operation) { return tokenManager.getAuthorizationResults() .next() .handle((response, sink) -> { if (response != AmqpResponseCode.ACCEPTED && response != AmqpResponseCode.OK) { sink.error(new AmqpException(false, String.format( "User does not have authorization to perform operation [%s] on entity [%s]. Response: [%s]", operation, entityPath, response), getErrorContext())); } else { sink.complete(); } }); } /** * Creates an AMQP message with the required application properties. * * @param operation Management operation to perform (ie. peek, update-disposition, etc.) * @param associatedLinkName Name of the open receive link that first received the message. * * @return An AMQP message with the required headers. */ private Message createManagementMessage(String operation, String associatedLinkName) { final Duration serverTimeout = MessageUtils.adjustServerTimeout(operationTimeout); final Map<String, Object> applicationProperties = new HashMap<>(); applicationProperties.put(ManagementConstants.MANAGEMENT_OPERATION_KEY, operation); applicationProperties.put(ManagementConstants.SERVER_TIMEOUT, serverTimeout.toMillis()); if (!CoreUtils.isNullOrEmpty(associatedLinkName)) { applicationProperties.put(ManagementConstants.ASSOCIATED_LINK_NAME_KEY, associatedLinkName); } final Message message = Proton.message(); message.setApplicationProperties(new ApplicationProperties(applicationProperties)); return message; } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(fullyQualifiedNamespace, entityPath); } }
Why duplicate logic, the logic in cancel and with iterable and single item are exactly the same.
public Mono<Void> cancelScheduledMessages(Iterable<Long> sequenceNumbers, String associatedLinkName) { return isAuthorized(ManagementConstants.OPERATION_CANCEL_SCHEDULED_MESSAGE) .then(createChannel.flatMap(channel -> { final Message requestMessage = createManagementMessage( ManagementConstants.OPERATION_CANCEL_SCHEDULED_MESSAGE, associatedLinkName); final List<Long> numbers = new ArrayList<>(); sequenceNumbers.forEach(s -> numbers.add(s)); Long[] longs = numbers.toArray(new Long[0]); requestMessage.setBody(new AmqpValue(Collections.singletonMap(ManagementConstants.SEQUENCE_NUMBERS, longs))); return sendWithVerify(channel, requestMessage, null); })).then(); }
return isAuthorized(ManagementConstants.OPERATION_CANCEL_SCHEDULED_MESSAGE)
public Mono<Void> cancelScheduledMessages(Iterable<Long> sequenceNumbers, String associatedLinkName) { return isAuthorized(ManagementConstants.OPERATION_CANCEL_SCHEDULED_MESSAGE) .then(createChannel.flatMap(channel -> { final Message requestMessage = createManagementMessage( ManagementConstants.OPERATION_CANCEL_SCHEDULED_MESSAGE, associatedLinkName); final List<Long> numbers = new ArrayList<>(); sequenceNumbers.forEach(s -> numbers.add(s)); final Long[] longs = numbers.toArray(new Long[0]); requestMessage.setBody(new AmqpValue(Collections.singletonMap(ManagementConstants.SEQUENCE_NUMBERS, longs))); return sendWithVerify(channel, requestMessage, null); })).then(); }
class ManagementChannel implements ServiceBusManagementNode { private final MessageSerializer messageSerializer; private final TokenManager tokenManager; private final Duration operationTimeout; private final Mono<RequestResponseChannel> createChannel; private final String fullyQualifiedNamespace; private final ClientLogger logger; private final String entityPath; private volatile boolean isDisposed; ManagementChannel(Mono<RequestResponseChannel> createChannel, String fullyQualifiedNamespace, String entityPath, TokenManager tokenManager, MessageSerializer messageSerializer, Duration operationTimeout) { this.createChannel = Objects.requireNonNull(createChannel, "'createChannel' cannot be null."); this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.logger = new ClientLogger(String.format("%s<%s>", ManagementChannel.class, entityPath)); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.tokenManager = Objects.requireNonNull(tokenManager, "'tokenManager' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); } /** * {@inheritDoc} */ @Override public Mono<Void> cancelScheduledMessage(long sequenceNumber, String associatedLinkName, ServiceBusTransactionContext transactionContext) { return isAuthorized(ManagementConstants.OPERATION_CANCEL_SCHEDULED_MESSAGE) .then(createChannel.flatMap(channel -> { final Message requestMessage = createManagementMessage( ManagementConstants.OPERATION_CANCEL_SCHEDULED_MESSAGE, associatedLinkName); requestMessage.setBody(new AmqpValue(Collections.singletonMap(ManagementConstants.SEQUENCE_NUMBERS, new Long[]{sequenceNumber}))); TransactionalState transactionalState = null; if (transactionContext != null && transactionContext.getTransactionId() != null) { transactionalState = new TransactionalState(); transactionalState.setTxnId(new Binary(transactionContext.getTransactionId().array())); } return sendWithVerify(channel, requestMessage, transactionalState); })).then(); } /** * {@inheritDoc} */ @Override /** * {@inheritDoc} */ @Override public Mono<byte[]> getSessionState(String sessionId, String associatedLinkName) { if (sessionId == null) { return monoError(logger, new NullPointerException("'sessionId' cannot be null.")); } else if (sessionId.isEmpty()) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be blank.")); } return isAuthorized(OPERATION_GET_SESSION_STATE).then(createChannel.flatMap(channel -> { final Message message = createManagementMessage(OPERATION_GET_SESSION_STATE, associatedLinkName); final Map<String, Object> body = new HashMap<>(); body.put(ManagementConstants.SESSION_ID, sessionId); message.setBody(new AmqpValue(body)); return sendWithVerify(channel, message, null); })).flatMap(response -> { final Object value = ((AmqpValue) response.getBody()).getValue(); if (!(value instanceof Map)) { return monoError(logger, Exceptions.propagate(new AmqpException(false, String.format( "Body not expected when renewing session. Id: %s. Value: %s", sessionId, value), getErrorContext()))); } @SuppressWarnings("unchecked") final Map<String, Object> map = (Map<String, Object>) value; final Object sessionState = map.get(ManagementConstants.SESSION_STATE); if (sessionState == null) { logger.info("sessionId[{}]. Does not have a session state.", sessionId); return Mono.empty(); } final byte[] state = ((Binary) sessionState).getArray(); return Mono.just(state); }); } /** * {@inheritDoc} */ @Override public Mono<ServiceBusReceivedMessage> peek(long fromSequenceNumber, String sessionId, String associatedLinkName) { return peek(fromSequenceNumber, sessionId, associatedLinkName, 1) .last(); } /** * {@inheritDoc} */ @Override public Flux<ServiceBusReceivedMessage> peek(long fromSequenceNumber, String sessionId, String associatedLinkName, int maxMessages) { return isAuthorized(OPERATION_PEEK).thenMany(createChannel.flatMap(channel -> { final Message message = createManagementMessage(OPERATION_PEEK, associatedLinkName); final Map<String, Object> requestBody = new HashMap<>(); requestBody.put(ManagementConstants.FROM_SEQUENCE_NUMBER, fromSequenceNumber); requestBody.put(ManagementConstants.MESSAGE_COUNT_KEY, maxMessages); if (!CoreUtils.isNullOrEmpty(sessionId)) { requestBody.put(ManagementConstants.SESSION_ID, sessionId); } message.setBody(new AmqpValue(requestBody)); return sendWithVerify(channel, message, null); }).flatMapMany(response -> { final List<ServiceBusReceivedMessage> messages = messageSerializer.deserializeList(response, ServiceBusReceivedMessage.class); return Flux.fromIterable(messages); })); } /** * {@inheritDoc} */ @Override public Flux<ServiceBusReceivedMessage> receiveDeferredMessages(ReceiveMode receiveMode, String sessionId, String associatedLinkName, Iterable<Long> sequenceNumbers) { return isAuthorized(ManagementConstants.OPERATION_RECEIVE_BY_SEQUENCE_NUMBER) .thenMany(createChannel.flatMap(channel -> { final Message message = createManagementMessage( ManagementConstants.OPERATION_RECEIVE_BY_SEQUENCE_NUMBER, associatedLinkName); final Map<String, Object> requestBodyMap = new HashMap<>(); final List<Long> numbers = new ArrayList<>(); sequenceNumbers.forEach(s -> numbers.add(s)); Long[] longs = numbers.toArray(new Long[0]); requestBodyMap.put(ManagementConstants.SEQUENCE_NUMBERS, longs); requestBodyMap.put(ManagementConstants.RECEIVER_SETTLE_MODE, UnsignedInteger.valueOf(receiveMode == ReceiveMode.RECEIVE_AND_DELETE ? 0 : 1)); if (!CoreUtils.isNullOrEmpty(sessionId)) { requestBodyMap.put(ManagementConstants.SESSION_ID, sessionId); } message.setBody(new AmqpValue(requestBodyMap)); return sendWithVerify(channel, message, null); }).flatMapMany(amqpMessage -> { final List<ServiceBusReceivedMessage> messageList = messageSerializer.deserializeList(amqpMessage, ServiceBusReceivedMessage.class); return Flux.fromIterable(messageList); })); } /** * {@inheritDoc} */ @Override public Mono<Instant> renewMessageLock(String lockToken, String associatedLinkName) { return isAuthorized(OPERATION_PEEK).then(createChannel.flatMap(channel -> { final Message requestMessage = createManagementMessage(ManagementConstants.OPERATION_RENEW_LOCK, associatedLinkName); final Map<String, Object> requestBody = new HashMap<>(); requestBody.put(ManagementConstants.LOCK_TOKENS_KEY, new UUID[]{UUID.fromString(lockToken)}); requestMessage.setBody(new AmqpValue(requestBody)); return sendWithVerify(channel, requestMessage, null); }).map(responseMessage -> { final List<Instant> renewTimeList = messageSerializer.deserializeList(responseMessage, Instant.class); if (CoreUtils.isNullOrEmpty(renewTimeList)) { throw logger.logExceptionAsError(Exceptions.propagate(new AmqpException(false, String.format( "Service bus response empty. Could not renew message with lock token: '%s'.", lockToken), getErrorContext()))); } return renewTimeList.get(0); })); } @Override public Mono<Instant> renewSessionLock(String sessionId, String associatedLinkName) { if (sessionId == null) { return monoError(logger, new NullPointerException("'sessionId' cannot be null.")); } else if (sessionId.isEmpty()) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be blank.")); } return isAuthorized(OPERATION_RENEW_SESSION_LOCK).then(createChannel.flatMap(channel -> { final Message message = createManagementMessage(OPERATION_RENEW_SESSION_LOCK, associatedLinkName); final Map<String, Object> body = new HashMap<>(); body.put(ManagementConstants.SESSION_ID, sessionId); message.setBody(new AmqpValue(body)); return sendWithVerify(channel, message, null); })).map(response -> { final Object value = ((AmqpValue) response.getBody()).getValue(); if (!(value instanceof Map)) { throw logger.logExceptionAsError(Exceptions.propagate(new AmqpException(false, String.format( "Body not expected when renewing session. Id: %s. Value: %s", sessionId, value), getErrorContext()))); } @SuppressWarnings("unchecked") final Map<String, Object> map = (Map<String, Object>) value; final Object expirationValue = map.get(ManagementConstants.EXPIRATION); if (!(expirationValue instanceof Date)) { throw logger.logExceptionAsError(Exceptions.propagate(new AmqpException(false, String.format( "Expiration is not of type Date when renewing session. Id: %s. Value: %s", sessionId, expirationValue), getErrorContext()))); } return ((Date) expirationValue).toInstant(); }); } /** * {@inheritDoc} */ @Override public Flux<Long> schedule(final List<ServiceBusMessage> messages, OffsetDateTime scheduledEnqueueTime, int maxLinkSize, String associatedLinkName, ServiceBusTransactionContext transactionContext) { return isAuthorized(OPERATION_SCHEDULE_MESSAGE).thenMany(createChannel.flatMap(channel -> { final Collection<Map<String, Object>> messageList = new LinkedList<>(); for (ServiceBusMessage message : messages) { message.setScheduledEnqueueTime(scheduledEnqueueTime); final Message amqpMessage = messageSerializer.serialize(message); final int payloadSize = messageSerializer.getSize(amqpMessage); final int allocationSize = Math.min(payloadSize + ManagementConstants.MAX_MESSAGING_AMQP_HEADER_SIZE_BYTES, maxLinkSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = amqpMessage.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format( "Error sending. Size of the payload exceeded maximum message size: %s kb", maxLinkSize / 1024); final AmqpErrorContext errorContext = channel.getErrorContext(); return monoError(logger, Exceptions.propagate(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, errorContext))); } final Map<String, Object> messageEntry = new HashMap<>(); messageEntry.put(ManagementConstants.MESSAGE, new Binary(bytes, 0, encodedSize)); messageEntry.put(ManagementConstants.MESSAGE_ID, amqpMessage.getMessageId()); final String sessionId = amqpMessage.getGroupId(); if (!CoreUtils.isNullOrEmpty(sessionId)) { messageEntry.put(ManagementConstants.SESSION_ID, sessionId); } final String partitionKey = message.getPartitionKey(); if (!CoreUtils.isNullOrEmpty(partitionKey)) { messageEntry.put(ManagementConstants.PARTITION_KEY, partitionKey); } final String viaPartitionKey = message.getViaPartitionKey(); if (!CoreUtils.isNullOrEmpty(viaPartitionKey)) { messageEntry.put(ManagementConstants.VIA_PARTITION_KEY, viaPartitionKey); } messageList.add(messageEntry); } final Map<String, Object> requestBodyMap = new HashMap<>(); requestBodyMap.put(ManagementConstants.MESSAGES, messageList); final Message requestMessage = createManagementMessage(OPERATION_SCHEDULE_MESSAGE, associatedLinkName); requestMessage.setBody(new AmqpValue(requestBodyMap)); TransactionalState transactionalState = null; if (transactionContext != null && transactionContext.getTransactionId() != null) { transactionalState = new TransactionalState(); transactionalState.setTxnId(new Binary(transactionContext.getTransactionId().array())); } return sendWithVerify(channel, requestMessage, transactionalState); }) .flatMapMany(response -> { final List<Long> sequenceNumbers = messageSerializer.deserializeList(response, Long.class); if (CoreUtils.isNullOrEmpty(sequenceNumbers)) { fluxError(logger, new AmqpException(false, String.format( "Service Bus response was empty. Could not schedule message()s."), getErrorContext())); } return Flux.fromIterable(sequenceNumbers); })); } @Override public Mono<Void> setSessionState(String sessionId, byte[] state, String associatedLinkName) { if (sessionId == null) { return monoError(logger, new NullPointerException("'sessionId' cannot be null.")); } else if (sessionId.isEmpty()) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be blank.")); } return isAuthorized(OPERATION_SET_SESSION_STATE).then(createChannel.flatMap(channel -> { final Message message = createManagementMessage(OPERATION_SET_SESSION_STATE, associatedLinkName); final Map<String, Object> body = new HashMap<>(); body.put(ManagementConstants.SESSION_ID, sessionId); body.put(ManagementConstants.SESSION_STATE, state == null ? null : new Binary(state)); message.setBody(new AmqpValue(body)); return sendWithVerify(channel, message, null).then(); })); } @Override public Mono<Void> updateDisposition(String lockToken, DispositionStatus dispositionStatus, String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify, String sessionId, String associatedLinkName, ServiceBusTransactionContext transactionContext) { final UUID[] lockTokens = new UUID[]{UUID.fromString(lockToken)}; return isAuthorized(OPERATION_UPDATE_DISPOSITION).then(createChannel.flatMap(channel -> { logger.verbose("Update disposition of deliveries '{}' to '{}' on entity '{}', session '{}'", Arrays.toString(lockTokens), dispositionStatus, entityPath, sessionId); final Message message = createManagementMessage(OPERATION_UPDATE_DISPOSITION, associatedLinkName); final Map<String, Object> requestBody = new HashMap<>(); requestBody.put(ManagementConstants.LOCK_TOKENS_KEY, lockTokens); requestBody.put(ManagementConstants.DISPOSITION_STATUS_KEY, dispositionStatus.getValue()); if (deadLetterReason != null) { requestBody.put(ManagementConstants.DEADLETTER_REASON_KEY, deadLetterReason); } if (deadLetterErrorDescription != null) { requestBody.put(ManagementConstants.DEADLETTER_DESCRIPTION_KEY, deadLetterErrorDescription); } if (propertiesToModify != null && propertiesToModify.size() > 0) { requestBody.put(ManagementConstants.PROPERTIES_TO_MODIFY_KEY, propertiesToModify); } if (!CoreUtils.isNullOrEmpty(sessionId)) { requestBody.put(ManagementConstants.SESSION_ID, sessionId); } message.setBody(new AmqpValue(requestBody)); TransactionalState transactionalState = null; if (transactionContext != null && transactionContext.getTransactionId() != null) { transactionalState = new TransactionalState(); transactionalState.setTxnId(new Binary(transactionContext.getTransactionId().array())); } return sendWithVerify(channel, message, transactionalState); })).then(); } /** * {@inheritDoc} */ @Override public void close() { if (isDisposed) { return; } isDisposed = true; tokenManager.close(); } private Mono<Message> sendWithVerify(RequestResponseChannel channel, Message message, DeliveryState deliveryState) { return channel.sendWithAck(message, deliveryState) .handle((Message response, SynchronousSink<Message> sink) -> { if (RequestResponseUtils.isSuccessful(response)) { sink.next(response); return; } final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(response); final String statusDescription = RequestResponseUtils.getStatusDescription(response); final String errorCondition = RequestResponseUtils.getErrorCondition(response); final Throwable throwable = MessageUtils.toException( new ErrorCondition(Symbol.getSymbol(errorCondition), statusDescription), channel.getErrorContext()); logger.warning("status[{}] description[{}] condition[{}] Operation not successful.", statusCode, statusDescription, errorCondition); sink.error(throwable); }) .switchIfEmpty(Mono.error(new AmqpException(true, "No response received from management channel.", channel.getErrorContext()))); } private Mono<Void> isAuthorized(String operation) { return tokenManager.getAuthorizationResults() .next() .handle((response, sink) -> { if (response != AmqpResponseCode.ACCEPTED && response != AmqpResponseCode.OK) { sink.error(new AmqpException(false, String.format( "User does not have authorization to perform operation [%s] on entity [%s]. Response: [%s]", operation, entityPath, response), getErrorContext())); } else { sink.complete(); } }); } /** * Creates an AMQP message with the required application properties. * * @param operation Management operation to perform (ie. peek, update-disposition, etc.) * @param associatedLinkName Name of the open receive link that first received the message. * * @return An AMQP message with the required headers. */ private Message createManagementMessage(String operation, String associatedLinkName) { final Duration serverTimeout = MessageUtils.adjustServerTimeout(operationTimeout); final Map<String, Object> applicationProperties = new HashMap<>(); applicationProperties.put(ManagementConstants.MANAGEMENT_OPERATION_KEY, operation); applicationProperties.put(ManagementConstants.SERVER_TIMEOUT, serverTimeout.toMillis()); if (!CoreUtils.isNullOrEmpty(associatedLinkName)) { applicationProperties.put(ManagementConstants.ASSOCIATED_LINK_NAME_KEY, associatedLinkName); } final Message message = Proton.message(); message.setApplicationProperties(new ApplicationProperties(applicationProperties)); return message; } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(fullyQualifiedNamespace, entityPath); } }
class ManagementChannel implements ServiceBusManagementNode { private final MessageSerializer messageSerializer; private final TokenManager tokenManager; private final Duration operationTimeout; private final Mono<RequestResponseChannel> createChannel; private final String fullyQualifiedNamespace; private final ClientLogger logger; private final String entityPath; private volatile boolean isDisposed; ManagementChannel(Mono<RequestResponseChannel> createChannel, String fullyQualifiedNamespace, String entityPath, TokenManager tokenManager, MessageSerializer messageSerializer, Duration operationTimeout) { this.createChannel = Objects.requireNonNull(createChannel, "'createChannel' cannot be null."); this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.logger = new ClientLogger(String.format("%s<%s>", ManagementChannel.class, entityPath)); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.tokenManager = Objects.requireNonNull(tokenManager, "'tokenManager' cannot be null."); this.operationTimeout = Objects.requireNonNull(operationTimeout, "'operationTimeout' cannot be null."); } /** * {@inheritDoc} */ @Override /** * {@inheritDoc} */ @Override public Mono<byte[]> getSessionState(String sessionId, String associatedLinkName) { if (sessionId == null) { return monoError(logger, new NullPointerException("'sessionId' cannot be null.")); } else if (sessionId.isEmpty()) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be blank.")); } return isAuthorized(OPERATION_GET_SESSION_STATE).then(createChannel.flatMap(channel -> { final Message message = createManagementMessage(OPERATION_GET_SESSION_STATE, associatedLinkName); final Map<String, Object> body = new HashMap<>(); body.put(ManagementConstants.SESSION_ID, sessionId); message.setBody(new AmqpValue(body)); return sendWithVerify(channel, message, null); })).flatMap(response -> { final Object value = ((AmqpValue) response.getBody()).getValue(); if (!(value instanceof Map)) { return monoError(logger, Exceptions.propagate(new AmqpException(false, String.format( "Body not expected when renewing session. Id: %s. Value: %s", sessionId, value), getErrorContext()))); } @SuppressWarnings("unchecked") final Map<String, Object> map = (Map<String, Object>) value; final Object sessionState = map.get(ManagementConstants.SESSION_STATE); if (sessionState == null) { logger.info("sessionId[{}]. Does not have a session state.", sessionId); return Mono.empty(); } final byte[] state = ((Binary) sessionState).getArray(); return Mono.just(state); }); } /** * {@inheritDoc} */ @Override public Mono<ServiceBusReceivedMessage> peek(long fromSequenceNumber, String sessionId, String associatedLinkName) { return peek(fromSequenceNumber, sessionId, associatedLinkName, 1) .next(); } /** * {@inheritDoc} */ @Override public Flux<ServiceBusReceivedMessage> peek(long fromSequenceNumber, String sessionId, String associatedLinkName, int maxMessages) { return isAuthorized(OPERATION_PEEK).thenMany(createChannel.flatMap(channel -> { final Message message = createManagementMessage(OPERATION_PEEK, associatedLinkName); final Map<String, Object> requestBody = new HashMap<>(); requestBody.put(ManagementConstants.FROM_SEQUENCE_NUMBER, fromSequenceNumber); requestBody.put(ManagementConstants.MESSAGE_COUNT_KEY, maxMessages); if (!CoreUtils.isNullOrEmpty(sessionId)) { requestBody.put(ManagementConstants.SESSION_ID, sessionId); } message.setBody(new AmqpValue(requestBody)); return sendWithVerify(channel, message, null); }).flatMapMany(response -> { final List<ServiceBusReceivedMessage> messages = messageSerializer.deserializeList(response, ServiceBusReceivedMessage.class); return Flux.fromIterable(messages); })); } /** * {@inheritDoc} */ @Override public Flux<ServiceBusReceivedMessage> receiveDeferredMessages(ReceiveMode receiveMode, String sessionId, String associatedLinkName, Iterable<Long> sequenceNumbers) { return isAuthorized(ManagementConstants.OPERATION_RECEIVE_BY_SEQUENCE_NUMBER) .thenMany(createChannel.flatMap(channel -> { final Message message = createManagementMessage( ManagementConstants.OPERATION_RECEIVE_BY_SEQUENCE_NUMBER, associatedLinkName); final Map<String, Object> requestBodyMap = new HashMap<>(); final List<Long> numbers = new ArrayList<>(); sequenceNumbers.forEach(s -> numbers.add(s)); Long[] longs = numbers.toArray(new Long[0]); requestBodyMap.put(ManagementConstants.SEQUENCE_NUMBERS, longs); requestBodyMap.put(ManagementConstants.RECEIVER_SETTLE_MODE, UnsignedInteger.valueOf(receiveMode == ReceiveMode.RECEIVE_AND_DELETE ? 0 : 1)); if (!CoreUtils.isNullOrEmpty(sessionId)) { requestBodyMap.put(ManagementConstants.SESSION_ID, sessionId); } message.setBody(new AmqpValue(requestBodyMap)); return sendWithVerify(channel, message, null); }).flatMapMany(amqpMessage -> { final List<ServiceBusReceivedMessage> messageList = messageSerializer.deserializeList(amqpMessage, ServiceBusReceivedMessage.class); return Flux.fromIterable(messageList); })); } /** * {@inheritDoc} */ @Override public Mono<OffsetDateTime> renewMessageLock(String lockToken, String associatedLinkName) { return isAuthorized(OPERATION_PEEK).then(createChannel.flatMap(channel -> { final Message requestMessage = createManagementMessage(ManagementConstants.OPERATION_RENEW_LOCK, associatedLinkName); final Map<String, Object> requestBody = new HashMap<>(); requestBody.put(ManagementConstants.LOCK_TOKENS_KEY, new UUID[]{UUID.fromString(lockToken)}); requestMessage.setBody(new AmqpValue(requestBody)); return sendWithVerify(channel, requestMessage, null); }).map(responseMessage -> { final List<OffsetDateTime> renewTimeList = messageSerializer.deserializeList(responseMessage, OffsetDateTime.class); if (CoreUtils.isNullOrEmpty(renewTimeList)) { throw logger.logExceptionAsError(Exceptions.propagate(new AmqpException(false, String.format( "Service bus response empty. Could not renew message with lock token: '%s'.", lockToken), getErrorContext()))); } return renewTimeList.get(0); })); } @Override public Mono<OffsetDateTime> renewSessionLock(String sessionId, String associatedLinkName) { if (sessionId == null) { return monoError(logger, new NullPointerException("'sessionId' cannot be null.")); } else if (sessionId.isEmpty()) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be blank.")); } return isAuthorized(OPERATION_RENEW_SESSION_LOCK).then(createChannel.flatMap(channel -> { final Message message = createManagementMessage(OPERATION_RENEW_SESSION_LOCK, associatedLinkName); final Map<String, Object> body = new HashMap<>(); body.put(ManagementConstants.SESSION_ID, sessionId); message.setBody(new AmqpValue(body)); return sendWithVerify(channel, message, null); })).map(response -> { final Object value = ((AmqpValue) response.getBody()).getValue(); if (!(value instanceof Map)) { throw logger.logExceptionAsError(Exceptions.propagate(new AmqpException(false, String.format( "Body not expected when renewing session. Id: %s. Value: %s", sessionId, value), getErrorContext()))); } @SuppressWarnings("unchecked") final Map<String, Object> map = (Map<String, Object>) value; final Object expirationValue = map.get(ManagementConstants.EXPIRATION); if (!(expirationValue instanceof Date)) { throw logger.logExceptionAsError(Exceptions.propagate(new AmqpException(false, String.format( "Expiration is not of type Date when renewing session. Id: %s. Value: %s", sessionId, expirationValue), getErrorContext()))); } return ((Date) expirationValue).toInstant().atOffset(ZoneOffset.UTC); }); } /** * {@inheritDoc} */ @Override public Flux<Long> schedule(List<ServiceBusMessage> messages, OffsetDateTime scheduledEnqueueTime, int maxLinkSize, String associatedLinkName, ServiceBusTransactionContext transactionContext) { return isAuthorized(OPERATION_SCHEDULE_MESSAGE).thenMany(createChannel.flatMap(channel -> { final Collection<Map<String, Object>> messageList = new LinkedList<>(); for (ServiceBusMessage message : messages) { message.setScheduledEnqueueTime(scheduledEnqueueTime); final Message amqpMessage = messageSerializer.serialize(message); final int payloadSize = messageSerializer.getSize(amqpMessage); final int allocationSize = Math.min(payloadSize + ManagementConstants.MAX_MESSAGING_AMQP_HEADER_SIZE_BYTES, maxLinkSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = amqpMessage.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format( "Error sending. Size of the payload exceeded maximum message size: %s kb", maxLinkSize / 1024); final AmqpErrorContext errorContext = channel.getErrorContext(); return monoError(logger, Exceptions.propagate(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, errorContext))); } final Map<String, Object> messageEntry = new HashMap<>(); messageEntry.put(ManagementConstants.MESSAGE, new Binary(bytes, 0, encodedSize)); messageEntry.put(ManagementConstants.MESSAGE_ID, amqpMessage.getMessageId()); final String sessionId = amqpMessage.getGroupId(); if (!CoreUtils.isNullOrEmpty(sessionId)) { messageEntry.put(ManagementConstants.SESSION_ID, sessionId); } final String partitionKey = message.getPartitionKey(); if (!CoreUtils.isNullOrEmpty(partitionKey)) { messageEntry.put(ManagementConstants.PARTITION_KEY, partitionKey); } final String viaPartitionKey = message.getViaPartitionKey(); if (!CoreUtils.isNullOrEmpty(viaPartitionKey)) { messageEntry.put(ManagementConstants.VIA_PARTITION_KEY, viaPartitionKey); } messageList.add(messageEntry); } final Map<String, Object> requestBodyMap = new HashMap<>(); requestBodyMap.put(ManagementConstants.MESSAGES, messageList); final Message requestMessage = createManagementMessage(OPERATION_SCHEDULE_MESSAGE, associatedLinkName); requestMessage.setBody(new AmqpValue(requestBodyMap)); TransactionalState transactionalState = null; if (transactionContext != null && transactionContext.getTransactionId() != null) { transactionalState = new TransactionalState(); transactionalState.setTxnId(new Binary(transactionContext.getTransactionId().array())); } return sendWithVerify(channel, requestMessage, transactionalState); }) .flatMapMany(response -> { final List<Long> sequenceNumbers = messageSerializer.deserializeList(response, Long.class); if (CoreUtils.isNullOrEmpty(sequenceNumbers)) { fluxError(logger, new AmqpException(false, String.format( "Service Bus response was empty. Could not schedule message()s."), getErrorContext())); } return Flux.fromIterable(sequenceNumbers); })); } @Override public Mono<Void> setSessionState(String sessionId, byte[] state, String associatedLinkName) { if (sessionId == null) { return monoError(logger, new NullPointerException("'sessionId' cannot be null.")); } else if (sessionId.isEmpty()) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be blank.")); } return isAuthorized(OPERATION_SET_SESSION_STATE).then(createChannel.flatMap(channel -> { final Message message = createManagementMessage(OPERATION_SET_SESSION_STATE, associatedLinkName); final Map<String, Object> body = new HashMap<>(); body.put(ManagementConstants.SESSION_ID, sessionId); body.put(ManagementConstants.SESSION_STATE, state == null ? null : new Binary(state)); message.setBody(new AmqpValue(body)); return sendWithVerify(channel, message, null).then(); })); } @Override public Mono<Void> updateDisposition(String lockToken, DispositionStatus dispositionStatus, String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify, String sessionId, String associatedLinkName, ServiceBusTransactionContext transactionContext) { final UUID[] lockTokens = new UUID[]{UUID.fromString(lockToken)}; return isAuthorized(OPERATION_UPDATE_DISPOSITION).then(createChannel.flatMap(channel -> { logger.verbose("Update disposition of deliveries '{}' to '{}' on entity '{}', session '{}'", Arrays.toString(lockTokens), dispositionStatus, entityPath, sessionId); final Message message = createManagementMessage(OPERATION_UPDATE_DISPOSITION, associatedLinkName); final Map<String, Object> requestBody = new HashMap<>(); requestBody.put(ManagementConstants.LOCK_TOKENS_KEY, lockTokens); requestBody.put(ManagementConstants.DISPOSITION_STATUS_KEY, dispositionStatus.getValue()); if (deadLetterReason != null) { requestBody.put(ManagementConstants.DEADLETTER_REASON_KEY, deadLetterReason); } if (deadLetterErrorDescription != null) { requestBody.put(ManagementConstants.DEADLETTER_DESCRIPTION_KEY, deadLetterErrorDescription); } if (propertiesToModify != null && propertiesToModify.size() > 0) { requestBody.put(ManagementConstants.PROPERTIES_TO_MODIFY_KEY, propertiesToModify); } if (!CoreUtils.isNullOrEmpty(sessionId)) { requestBody.put(ManagementConstants.SESSION_ID, sessionId); } message.setBody(new AmqpValue(requestBody)); TransactionalState transactionalState = null; if (transactionContext != null && transactionContext.getTransactionId() != null) { transactionalState = new TransactionalState(); transactionalState.setTxnId(new Binary(transactionContext.getTransactionId().array())); } return sendWithVerify(channel, message, transactionalState); })).then(); } /** * {@inheritDoc} */ @Override public void close() { if (isDisposed) { return; } isDisposed = true; tokenManager.close(); } private Mono<Message> sendWithVerify(RequestResponseChannel channel, Message message, DeliveryState deliveryState) { return channel.sendWithAck(message, deliveryState) .handle((Message response, SynchronousSink<Message> sink) -> { if (RequestResponseUtils.isSuccessful(response)) { sink.next(response); return; } final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(response); if (statusCode == AmqpResponseCode.NO_CONTENT) { sink.next(response); return; } final String errorCondition = RequestResponseUtils.getErrorCondition(response); if (statusCode == AmqpResponseCode.NOT_FOUND) { final AmqpErrorCondition amqpErrorCondition = AmqpErrorCondition.fromString(errorCondition); if (amqpErrorCondition == AmqpErrorCondition.MESSAGE_NOT_FOUND) { logger.info("There was no matching message found."); sink.next(response); return; } else if (amqpErrorCondition == AmqpErrorCondition.SESSION_NOT_FOUND) { logger.info("There was no matching session found."); sink.next(response); return; } } final String statusDescription = RequestResponseUtils.getStatusDescription(response); final Throwable throwable = ExceptionUtil.toException(errorCondition, statusDescription, channel.getErrorContext()); logger.warning("status[{}] description[{}] condition[{}] Operation not successful.", statusCode, statusDescription, errorCondition); sink.error(throwable); }) .switchIfEmpty(Mono.error(new AmqpException(true, "No response received from management channel.", channel.getErrorContext()))); } private Mono<Void> isAuthorized(String operation) { return tokenManager.getAuthorizationResults() .next() .handle((response, sink) -> { if (response != AmqpResponseCode.ACCEPTED && response != AmqpResponseCode.OK) { sink.error(new AmqpException(false, String.format( "User does not have authorization to perform operation [%s] on entity [%s]. Response: [%s]", operation, entityPath, response), getErrorContext())); } else { sink.complete(); } }); } /** * Creates an AMQP message with the required application properties. * * @param operation Management operation to perform (ie. peek, update-disposition, etc.) * @param associatedLinkName Name of the open receive link that first received the message. * * @return An AMQP message with the required headers. */ private Message createManagementMessage(String operation, String associatedLinkName) { final Duration serverTimeout = MessageUtils.adjustServerTimeout(operationTimeout); final Map<String, Object> applicationProperties = new HashMap<>(); applicationProperties.put(ManagementConstants.MANAGEMENT_OPERATION_KEY, operation); applicationProperties.put(ManagementConstants.SERVER_TIMEOUT, serverTimeout.toMillis()); if (!CoreUtils.isNullOrEmpty(associatedLinkName)) { applicationProperties.put(ManagementConstants.ASSOCIATED_LINK_NAME_KEY, associatedLinkName); } final Message message = Proton.message(); message.setApplicationProperties(new ApplicationProperties(applicationProperties)); return message; } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(fullyQualifiedNamespace, entityPath); } }
Any sub queue are accessed via non-session receiver, confirmed with dotnet also. Thus we are not exposing `subQueue()` in session receiver builder. That is why I am passing NONE.
public ServiceBusReceiverAsyncClient buildAsyncClient() { final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(logger, entityType, queueName, topicName, subscriptionName, SubQueue.NONE); validateAndThrow(prefetchCount, maxAutoLockRenewalDuration); final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, maxAutoLockRenewalDuration, sessionId, isRollingSessionReceiver(), maxConcurrentSessions); if (CoreUtils.isNullOrEmpty(sessionId)) { final UnnamedSessionManager sessionManager = new UnnamedSessionManager(entityPath, entityType, connectionProcessor, connectionProcessor.getRetryOptions().getTryTimeout(), tracerProvider, messageSerializer, receiverOptions); return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, sessionManager); } else { return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose); } }
SubQueue.NONE);
public ServiceBusReceiverAsyncClient buildAsyncClient() { final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(logger, entityType, queueName, topicName, subscriptionName, SubQueue.NONE); validateAndThrow(prefetchCount, maxAutoLockRenewalDuration); final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, maxAutoLockRenewalDuration, sessionId, isRollingSessionReceiver(), maxConcurrentSessions); if (CoreUtils.isNullOrEmpty(sessionId)) { final UnnamedSessionManager sessionManager = new UnnamedSessionManager(entityPath, entityType, connectionProcessor, connectionProcessor.getRetryOptions().getTryTimeout(), tracerProvider, messageSerializer, receiverOptions); return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, sessionManager); } else { return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose); } }
class ServiceBusSessionReceiverClientBuilder { private Integer maxConcurrentSessions = null; private int prefetchCount = DEFAULT_PREFETCH_COUNT; private String queueName; private ReceiveMode receiveMode = ReceiveMode.PEEK_LOCK; private String sessionId; private String subscriptionName; private String topicName; private Duration maxAutoLockRenewalDuration; private ServiceBusSessionReceiverClientBuilder() { } /** * Enables auto-lock renewal by renewing each session lock until the {@code maxAutoLockRenewalDuration} has * elapsed. * * @param maxAutoLockRenewalDuration Maximum amount of time to renew the session lock. * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewalDuration(Duration maxAutoLockRenewalDuration) { this.maxAutoLockRenewalDuration = maxAutoLockRenewalDuration; return this; } /** * Enables session processing roll-over by processing at most {@code maxConcurrentSessions}. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ public ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError(new IllegalArgumentException( "maxConcurrentSessions cannot be less than 1.")); } this.maxConcurrentSessions = maxConcurrentSessions; return this; } /** * Sets the prefetch count of the receiver. For both {@link ReceiveMode * ReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application asks for one using {@link ServiceBusReceiverAsyncClient * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off. * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) { this.prefetchCount = prefetchCount; return this; } /** * Sets the name of the queue to create a receiver for. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the receive mode for the receiver. * * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder receiveMode(ReceiveMode receiveMode) { this.receiveMode = receiveMode; return this; } /** * Sets the session id. * * @param sessionId session id. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder sessionId(String sessionId) { this.sessionId = sessionId; return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) { this.subscriptionName = subscriptionName; return this; } /** * Sets the name of the topic. <b>{@link * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading * {@link ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ /** * Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading * {@link ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusReceiverClient buildClient() { return new ServiceBusReceiverClient(buildAsyncClient(), retryOptions.getTryTimeout()); } /** * This is a rolling session receiver only if maxConcurrentSessions is > 0 AND sessionId is null or empty. If * there is a sessionId, this is going to be a single, named session receiver. * * @return {@code true} if this is an unnamed rolling session receiver; {@code false} otherwise. */ private boolean isRollingSessionReceiver() { if (maxConcurrentSessions == null) { return false; } if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("Maximum number of concurrent sessions must be positive.")); } return CoreUtils.isNullOrEmpty(sessionId); } }
class ServiceBusSessionReceiverClientBuilder { private Integer maxConcurrentSessions = null; private int prefetchCount = DEFAULT_PREFETCH_COUNT; private String queueName; private ReceiveMode receiveMode = ReceiveMode.PEEK_LOCK; private String sessionId; private String subscriptionName; private String topicName; private Duration maxAutoLockRenewalDuration; private ServiceBusSessionReceiverClientBuilder() { } /** * Enables auto-lock renewal by renewing each session lock until the {@code maxAutoLockRenewalDuration} has * elapsed. * * @param maxAutoLockRenewalDuration Maximum amount of time to renew the session lock. * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewalDuration(Duration maxAutoLockRenewalDuration) { this.maxAutoLockRenewalDuration = maxAutoLockRenewalDuration; return this; } /** * Enables session processing roll-over by processing at most {@code maxConcurrentSessions}. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ public ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError(new IllegalArgumentException( "maxConcurrentSessions cannot be less than 1.")); } this.maxConcurrentSessions = maxConcurrentSessions; return this; } /** * Sets the prefetch count of the receiver. For both {@link ReceiveMode * ReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application asks for one using {@link ServiceBusReceiverAsyncClient * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off. * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) { this.prefetchCount = prefetchCount; return this; } /** * Sets the name of the queue to create a receiver for. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the receive mode for the receiver. * * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder receiveMode(ReceiveMode receiveMode) { this.receiveMode = receiveMode; return this; } /** * Sets the session id. * * @param sessionId session id. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder sessionId(String sessionId) { this.sessionId = sessionId; return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) { this.subscriptionName = subscriptionName; return this; } /** * Sets the name of the topic. <b>{@link * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading * {@link ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ /** * Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading * {@link ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusReceiverClient buildClient() { return new ServiceBusReceiverClient(buildAsyncClient(), retryOptions.getTryTimeout()); } /** * This is a rolling session receiver only if maxConcurrentSessions is > 0 AND sessionId is null or empty. If * there is a sessionId, this is going to be a single, named session receiver. * * @return {@code true} if this is an unnamed rolling session receiver; {@code false} otherwise. */ private boolean isRollingSessionReceiver() { if (maxConcurrentSessions == null) { return false; } if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("Maximum number of concurrent sessions must be positive.")); } return CoreUtils.isNullOrEmpty(sessionId); } }
use more descriptive variable names instead of `e` and `p`.
public static void main(final String[] args) throws IOException { String endpoint = "<anomaly-detector-resource-endpoint>"; String key = "<anomaly-detector-resource-key>"; HttpHeaders headers = new HttpHeaders() .put("Accept", ContentType.APPLICATION_JSON); HttpPipelinePolicy authPolicy = new AzureKeyCredentialPolicy("Ocp-Apim-Subscription-Key", new AzureKeyCredential(key)); AddHeadersPolicy addHeadersPolicy = new AddHeadersPolicy(headers); HttpPipeline httpPipeline = new HttpPipelineBuilder().httpClient(HttpClient.createDefault()) .policies(authPolicy, addHeadersPolicy).build(); AnomalyDetectorClient anomalyDetectorClient = new AnomalyDetectorClientBuilder() .pipeline(httpPipeline) .endpoint(endpoint) .buildClient(); Path path = Paths.get("./src/samples/java/sample_data/request-data.csv"); List<String> requestData = Files.readAllLines(path); List<TimeSeriesPoint> series = requestData.stream() .map(e -> e.trim()) .filter(e -> e.length() > 0) .map(e -> e.split(",", 2)) .filter(e -> e.length == 2) .map(e -> { TimeSeriesPoint p = new TimeSeriesPoint(); p.setTimestamp(OffsetDateTime.parse(e[0])); p.setValue(Float.parseFloat(e[1])); return p; }) .collect(Collectors.toList()); System.out.println("Detecting anomalies as a batch..."); DetectRequest request = new DetectRequest(); request.setSeries(series); request.setGranularity(TimeGranularity.DAILY); EntireDetectResponse response = anomalyDetectorClient.detectEntireSeries(request); if (response.getIsAnomaly().contains(true)) { System.out.println("Anomalies found in the following data positions:"); for (int i = 0; i < request.getSeries().size(); ++i) { if (response.getIsAnomaly().get(i)) { System.out.print(i + " "); } } System.out.println(); } else { System.out.println("No anomalies were found in the series."); } }
})
public static void main(final String[] args) throws IOException { String endpoint = "<anomaly-detector-resource-endpoint>"; String key = "<anomaly-detector-resource-key>"; HttpHeaders headers = new HttpHeaders() .put("Accept", ContentType.APPLICATION_JSON); HttpPipelinePolicy authPolicy = new AzureKeyCredentialPolicy("Ocp-Apim-Subscription-Key", new AzureKeyCredential(key)); AddHeadersPolicy addHeadersPolicy = new AddHeadersPolicy(headers); HttpPipeline httpPipeline = new HttpPipelineBuilder().httpClient(HttpClient.createDefault()) .policies(authPolicy, addHeadersPolicy).build(); AnomalyDetectorClient anomalyDetectorClient = new AnomalyDetectorClientBuilder() .pipeline(httpPipeline) .endpoint(endpoint) .buildClient(); Path path = Paths.get("./src/samples/java/sample_data/request-data.csv"); List<String> requestData = Files.readAllLines(path); List<TimeSeriesPoint> series = requestData.stream() .map(line -> line.trim()) .filter(line -> line.length() > 0) .map(line -> line.split(",", 2)) .filter(splits -> splits.length == 2) .map(splits -> { TimeSeriesPoint timeSeriesPoint = new TimeSeriesPoint(); timeSeriesPoint.setTimestamp(OffsetDateTime.parse(splits[0])); timeSeriesPoint.setValue(Float.parseFloat(splits[1])); return timeSeriesPoint; }) .collect(Collectors.toList()); System.out.println("Detecting anomalies as a batch..."); DetectRequest request = new DetectRequest(); request.setSeries(series); request.setGranularity(TimeGranularity.DAILY); EntireDetectResponse response = anomalyDetectorClient.detectEntireSeries(request); if (response.getIsAnomaly().contains(true)) { System.out.println("Anomalies found in the following data positions:"); for (int i = 0; i < request.getSeries().size(); ++i) { if (response.getIsAnomaly().get(i)) { System.out.print(i + " "); } } System.out.println(); } else { System.out.println("No anomalies were found in the series."); } }
class DetectAnomaliesEntireSeries { }
class DetectAnomaliesEntireSeries { /** * Main method to invoke this demo. * * @param args Unused arguments to the program. * @throws IOException Exception thrown when there is an error in reading all the lines from the csv file. */ }
It would be good to add comments to describe what is happening in the samples to help the users follow the sample better.
public static void main(final String[] args) throws IOException { String endpoint = "<anomaly-detector-resource-endpoint>"; String key = "<anomaly-detector-resource-key>"; HttpHeaders headers = new HttpHeaders() .put("Accept", ContentType.APPLICATION_JSON); HttpPipelinePolicy authPolicy = new AzureKeyCredentialPolicy("Ocp-Apim-Subscription-Key", new AzureKeyCredential(key)); AddHeadersPolicy addHeadersPolicy = new AddHeadersPolicy(headers); HttpPipeline httpPipeline = new HttpPipelineBuilder().httpClient(HttpClient.createDefault()) .policies(authPolicy, addHeadersPolicy).build(); AnomalyDetectorClient anomalyDetectorClient = new AnomalyDetectorClientBuilder() .pipeline(httpPipeline) .endpoint(endpoint) .buildClient(); Path path = Paths.get("./src/samples/java/sample_data/request-data.csv"); List<String> requestData = Files.readAllLines(path); List<TimeSeriesPoint> series = requestData.stream() .map(e -> e.trim()) .filter(e -> e.length() > 0) .map(e -> e.split(",", 2)) .filter(e -> e.length == 2) .map(e -> { TimeSeriesPoint p = new TimeSeriesPoint(); p.setTimestamp(OffsetDateTime.parse(e[0])); p.setValue(Float.parseFloat(e[1])); return p; }) .collect(Collectors.toList()); System.out.println("Detecting change points..."); ChangePointDetectRequest request = new ChangePointDetectRequest(); request.setSeries(series); request.setGranularity(TimeGranularity.DAILY); ChangePointDetectResponse response = anomalyDetectorClient.detectChangePoint(request); if (response.getIsChangePoint().contains(true)) { System.out.println("Change points found in the following data positions:"); for (int i = 0; i < request.getSeries().size(); ++i) { if (response.getIsChangePoint().get(i)) { System.out.print(i + " "); } } System.out.println(); } else { System.out.println("No change points were found in the series."); } }
List<TimeSeriesPoint> series = requestData.stream()
public static void main(final String[] args) throws IOException { String endpoint = "<anomaly-detector-resource-endpoint>"; String key = "<anomaly-detector-resource-key>"; HttpHeaders headers = new HttpHeaders() .put("Accept", ContentType.APPLICATION_JSON); HttpPipelinePolicy authPolicy = new AzureKeyCredentialPolicy("Ocp-Apim-Subscription-Key", new AzureKeyCredential(key)); AddHeadersPolicy addHeadersPolicy = new AddHeadersPolicy(headers); HttpPipeline httpPipeline = new HttpPipelineBuilder().httpClient(HttpClient.createDefault()) .policies(authPolicy, addHeadersPolicy).build(); AnomalyDetectorClient anomalyDetectorClient = new AnomalyDetectorClientBuilder() .pipeline(httpPipeline) .endpoint(endpoint) .buildClient(); Path path = Paths.get("./src/samples/java/sample_data/request-data.csv"); List<String> requestData = Files.readAllLines(path); List<TimeSeriesPoint> series = requestData.stream() .map(line -> line.trim()) .filter(line -> line.length() > 0) .map(line -> line.split(",", 2)) .filter(splits -> splits.length == 2) .map(splits -> { TimeSeriesPoint timeSeriesPoint = new TimeSeriesPoint(); timeSeriesPoint.setTimestamp(OffsetDateTime.parse(splits[0])); timeSeriesPoint.setValue(Float.parseFloat(splits[1])); return timeSeriesPoint; }) .collect(Collectors.toList()); System.out.println("Detecting change points..."); ChangePointDetectRequest request = new ChangePointDetectRequest(); request.setSeries(series); request.setGranularity(TimeGranularity.DAILY); ChangePointDetectResponse response = anomalyDetectorClient.detectChangePoint(request); if (response.getIsChangePoint().contains(true)) { System.out.println("Change points found in the following data positions:"); for (int i = 0; i < request.getSeries().size(); ++i) { if (response.getIsChangePoint().get(i)) { System.out.print(i + " "); } } System.out.println(); } else { System.out.println("No change points were found in the series."); } }
class DetectChangePoints { }
class DetectChangePoints { /** * Main method to invoke this demo. * * @param args Unused arguments to the program. * @throws IOException Exception thrown when there is an error in reading all the lines from the csv file. */ }
The multi nested if loops is hard to reason about.. is it possible to flatten it and return early? ```java if (TablesConstants.METADATA_KEYS.contains(fieldName) .... ) { return valueNode.asText(); } ```
private Object getEntityFieldAsObject(JsonNode parentNode, String fieldName) { JsonNode valueNode = parentNode.get(fieldName); if (!TablesConstants.METADATA_KEYS.contains(fieldName) && !fieldName.endsWith(TablesConstants.ODATA_TYPE_KEY_SUFFIX)) { JsonNode typeNode = parentNode.get(fieldName + TablesConstants.ODATA_TYPE_KEY_SUFFIX); if (typeNode != null) { String typeString = typeNode.asText(); EntityDataModelType type = EntityDataModelType.parse(typeString); if (type != null) { try { return type.deserialize(valueNode.asText()); } catch (Exception e) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "'%s' value is not a valid %s.", fieldName, type.getEdmType()), e)); } } else { logger.warning(String.format("'%s' value has unknown OData type %s", fieldName, typeString)); } } } return valueNode.asText(); }
if (!TablesConstants.METADATA_KEYS.contains(fieldName)
private Object getEntityFieldAsObject(JsonNode parentNode, String fieldName) throws IOException { JsonNode valueNode = parentNode.get(fieldName); if (TablesConstants.METADATA_KEYS.contains(fieldName) || fieldName.endsWith(TablesConstants.ODATA_TYPE_KEY_SUFFIX)) { return serializer().treeToValue(valueNode, Object.class); } JsonNode typeNode = parentNode.get(fieldName + TablesConstants.ODATA_TYPE_KEY_SUFFIX); if (typeNode == null) { return serializer().treeToValue(valueNode, Object.class); } String typeString = typeNode.asText(); EntityDataModelType type = EntityDataModelType.fromString(typeString); if (type == null) { logger.warning(String.format("'%s' value has unknown OData type %s", fieldName, typeString)); return serializer().treeToValue(valueNode, Object.class); } try { return type.deserialize(valueNode.asText()); } catch (Exception e) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "'%s' value is not a valid %s.", fieldName, type.getEdmType()), e)); } }
class TablesJacksonSerializer extends JacksonAdapter { private final ClientLogger logger = new ClientLogger(TablesJacksonSerializer.class); @Override public <U> U deserialize(String value, Type type, SerializerEncoding serializerEncoding) throws IOException { if (type == TableEntityQueryResponse.class) { return deserialize(new ByteArrayInputStream(value.getBytes(StandardCharsets.UTF_8)), type, serializerEncoding); } else { return super.deserialize(value, type, serializerEncoding); } } @Override public <U> U deserialize(InputStream inputStream, Type type, SerializerEncoding serializerEncoding) throws IOException { if (type == TableEntityQueryResponse.class) { return deserializeTableEntityQueryResponse(inputStream); } else { return super.deserialize(inputStream, type, serializerEncoding); } } @SuppressWarnings("unchecked") private <U> U deserializeTableEntityQueryResponse(InputStream inputStream) throws IOException { String odataMetadata = null; List<Map<String, Object>> values = new ArrayList<>(); final JsonNode node = super.serializer().readTree(inputStream); Map<String, Object> singleValue = null; for (Iterator<Map.Entry<String, JsonNode>> it = node.fields(); it.hasNext();) { final Map.Entry<String, JsonNode> entry = it.next(); final String fieldName = entry.getKey(); final JsonNode childNode = entry.getValue(); if (fieldName.equals(TablesConstants.ODATA_METADATA_KEY)) { odataMetadata = childNode.asText(); } else if (fieldName.equals("value")) { if (childNode.isArray()) { for (JsonNode childEntry : childNode) { values.add(getEntityFieldsAsMap(childEntry)); } } else { if (singleValue == null) { singleValue = new HashMap<>(); } singleValue.put(fieldName, getEntityFieldAsObject(node, fieldName)); } } else { if (singleValue == null) { singleValue = new HashMap<>(); } singleValue.put(fieldName, getEntityFieldAsObject(node, fieldName)); } } if (singleValue != null) { if (values.size() > 0) { throw logger.logExceptionAsError(new IllegalStateException( "Unexpected response format. Response containing a 'value' array must not contain other properties." )); } values.add(singleValue); } return (U) new TableEntityQueryResponse() .setOdataMetadata(odataMetadata) .setValue(values); } private Map<String, Object> getEntityFieldsAsMap(JsonNode node) { Map<String, Object> result = new HashMap<>(); for (Iterator<String> it = node.fieldNames(); it.hasNext();) { String fieldName = it.next(); result.put(fieldName, getEntityFieldAsObject(node, fieldName)); if (fieldName.endsWith(TablesConstants.ODATA_TYPE_KEY_SUFFIX) && TablesConstants.ODATA_TYPE_NULL.equals(node.get(fieldName).asText())) { String nullFieldName = fieldName.substring(0, fieldName.length() - TablesConstants.ODATA_TYPE_KEY_SUFFIX.length()); result.put(nullFieldName, null); } } return result; } }
class TablesJacksonSerializer extends JacksonAdapter { private final ClientLogger logger = new ClientLogger(TablesJacksonSerializer.class); @Override public void serialize(Object object, SerializerEncoding encoding, OutputStream outputStream) throws IOException { if (object instanceof Map) { super.serialize(insertTypeProperties(object), encoding, outputStream); } else { super.serialize(object, encoding, outputStream); } } @SuppressWarnings("unchecked") private Map<String, Object> insertTypeProperties(Object o) { Map<String, Object> map = (Map<String, Object>) o; Map<String, Object> result = new HashMap<>(); for (Map.Entry<String, Object> entry : map.entrySet()) { String propertyName = entry.getKey(); Object propertyValue = entry.getValue(); if (propertyValue == null) { continue; } if (propertyValue instanceof Long) { result.put(propertyName, String.valueOf(propertyValue)); } else { result.put(propertyName, propertyValue); } if (TablesConstants.METADATA_KEYS.contains(propertyName) || propertyName.endsWith(TablesConstants.ODATA_TYPE_KEY_SUFFIX)) { continue; } EntityDataModelType typeToTag = EntityDataModelType.forClass(propertyValue.getClass()); if (typeToTag == null) { continue; } result.putIfAbsent(propertyName + TablesConstants.ODATA_TYPE_KEY_SUFFIX, typeToTag.getEdmType()); } return result; } @Override public <U> U deserialize(String value, Type type, SerializerEncoding serializerEncoding) throws IOException { if (type == TableEntityQueryResponse.class) { return deserialize(new ByteArrayInputStream(value.getBytes(StandardCharsets.UTF_8)), type, serializerEncoding); } else { return super.deserialize(value, type, serializerEncoding); } } @Override public <U> U deserialize(InputStream inputStream, Type type, SerializerEncoding serializerEncoding) throws IOException { if (type == TableEntityQueryResponse.class) { return deserializeTableEntityQueryResponse(inputStream); } else { return super.deserialize(inputStream, type, serializerEncoding); } } @SuppressWarnings("unchecked") private <U> U deserializeTableEntityQueryResponse(InputStream inputStream) throws IOException { String odataMetadata = null; List<Map<String, Object>> values = new ArrayList<>(); final JsonNode node = super.serializer().readTree(inputStream); Map<String, Object> singleValue = null; for (Iterator<Map.Entry<String, JsonNode>> it = node.fields(); it.hasNext();) { final Map.Entry<String, JsonNode> entry = it.next(); final String fieldName = entry.getKey(); final JsonNode childNode = entry.getValue(); if (fieldName.equals(TablesConstants.ODATA_METADATA_KEY)) { odataMetadata = childNode.asText(); } else if (fieldName.equals("value")) { if (childNode.isArray()) { for (JsonNode childEntry : childNode) { values.add(getEntityFieldsAsMap(childEntry)); } } else { if (singleValue == null) { singleValue = new HashMap<>(); } singleValue.put(fieldName, getEntityFieldAsObject(node, fieldName)); } } else { if (singleValue == null) { singleValue = new HashMap<>(); } singleValue.put(fieldName, getEntityFieldAsObject(node, fieldName)); } } if (singleValue != null) { if (values.size() > 0) { throw logger.logExceptionAsError(new IllegalStateException( "Unexpected response format. Response containing a 'value' array must not contain other properties." )); } values.add(singleValue); } return (U) new TableEntityQueryResponse() .setOdataMetadata(odataMetadata) .setValue(values); } private Map<String, Object> getEntityFieldsAsMap(JsonNode node) throws IOException { Map<String, Object> result = new HashMap<>(); for (Iterator<String> it = node.fieldNames(); it.hasNext();) { String fieldName = it.next(); result.put(fieldName, getEntityFieldAsObject(node, fieldName)); } return result; } }
Fixed
private Object getEntityFieldAsObject(JsonNode parentNode, String fieldName) { JsonNode valueNode = parentNode.get(fieldName); if (!TablesConstants.METADATA_KEYS.contains(fieldName) && !fieldName.endsWith(TablesConstants.ODATA_TYPE_KEY_SUFFIX)) { JsonNode typeNode = parentNode.get(fieldName + TablesConstants.ODATA_TYPE_KEY_SUFFIX); if (typeNode != null) { String typeString = typeNode.asText(); EntityDataModelType type = EntityDataModelType.parse(typeString); if (type != null) { try { return type.deserialize(valueNode.asText()); } catch (Exception e) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "'%s' value is not a valid %s.", fieldName, type.getEdmType()), e)); } } else { logger.warning(String.format("'%s' value has unknown OData type %s", fieldName, typeString)); } } } return valueNode.asText(); }
if (!TablesConstants.METADATA_KEYS.contains(fieldName)
private Object getEntityFieldAsObject(JsonNode parentNode, String fieldName) throws IOException { JsonNode valueNode = parentNode.get(fieldName); if (TablesConstants.METADATA_KEYS.contains(fieldName) || fieldName.endsWith(TablesConstants.ODATA_TYPE_KEY_SUFFIX)) { return serializer().treeToValue(valueNode, Object.class); } JsonNode typeNode = parentNode.get(fieldName + TablesConstants.ODATA_TYPE_KEY_SUFFIX); if (typeNode == null) { return serializer().treeToValue(valueNode, Object.class); } String typeString = typeNode.asText(); EntityDataModelType type = EntityDataModelType.fromString(typeString); if (type == null) { logger.warning(String.format("'%s' value has unknown OData type %s", fieldName, typeString)); return serializer().treeToValue(valueNode, Object.class); } try { return type.deserialize(valueNode.asText()); } catch (Exception e) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format( "'%s' value is not a valid %s.", fieldName, type.getEdmType()), e)); } }
class TablesJacksonSerializer extends JacksonAdapter { private final ClientLogger logger = new ClientLogger(TablesJacksonSerializer.class); @Override public <U> U deserialize(String value, Type type, SerializerEncoding serializerEncoding) throws IOException { if (type == TableEntityQueryResponse.class) { return deserialize(new ByteArrayInputStream(value.getBytes(StandardCharsets.UTF_8)), type, serializerEncoding); } else { return super.deserialize(value, type, serializerEncoding); } } @Override public <U> U deserialize(InputStream inputStream, Type type, SerializerEncoding serializerEncoding) throws IOException { if (type == TableEntityQueryResponse.class) { return deserializeTableEntityQueryResponse(inputStream); } else { return super.deserialize(inputStream, type, serializerEncoding); } } @SuppressWarnings("unchecked") private <U> U deserializeTableEntityQueryResponse(InputStream inputStream) throws IOException { String odataMetadata = null; List<Map<String, Object>> values = new ArrayList<>(); final JsonNode node = super.serializer().readTree(inputStream); Map<String, Object> singleValue = null; for (Iterator<Map.Entry<String, JsonNode>> it = node.fields(); it.hasNext();) { final Map.Entry<String, JsonNode> entry = it.next(); final String fieldName = entry.getKey(); final JsonNode childNode = entry.getValue(); if (fieldName.equals(TablesConstants.ODATA_METADATA_KEY)) { odataMetadata = childNode.asText(); } else if (fieldName.equals("value")) { if (childNode.isArray()) { for (JsonNode childEntry : childNode) { values.add(getEntityFieldsAsMap(childEntry)); } } else { if (singleValue == null) { singleValue = new HashMap<>(); } singleValue.put(fieldName, getEntityFieldAsObject(node, fieldName)); } } else { if (singleValue == null) { singleValue = new HashMap<>(); } singleValue.put(fieldName, getEntityFieldAsObject(node, fieldName)); } } if (singleValue != null) { if (values.size() > 0) { throw logger.logExceptionAsError(new IllegalStateException( "Unexpected response format. Response containing a 'value' array must not contain other properties." )); } values.add(singleValue); } return (U) new TableEntityQueryResponse() .setOdataMetadata(odataMetadata) .setValue(values); } private Map<String, Object> getEntityFieldsAsMap(JsonNode node) { Map<String, Object> result = new HashMap<>(); for (Iterator<String> it = node.fieldNames(); it.hasNext();) { String fieldName = it.next(); result.put(fieldName, getEntityFieldAsObject(node, fieldName)); if (fieldName.endsWith(TablesConstants.ODATA_TYPE_KEY_SUFFIX) && TablesConstants.ODATA_TYPE_NULL.equals(node.get(fieldName).asText())) { String nullFieldName = fieldName.substring(0, fieldName.length() - TablesConstants.ODATA_TYPE_KEY_SUFFIX.length()); result.put(nullFieldName, null); } } return result; } }
class TablesJacksonSerializer extends JacksonAdapter { private final ClientLogger logger = new ClientLogger(TablesJacksonSerializer.class); @Override public void serialize(Object object, SerializerEncoding encoding, OutputStream outputStream) throws IOException { if (object instanceof Map) { super.serialize(insertTypeProperties(object), encoding, outputStream); } else { super.serialize(object, encoding, outputStream); } } @SuppressWarnings("unchecked") private Map<String, Object> insertTypeProperties(Object o) { Map<String, Object> map = (Map<String, Object>) o; Map<String, Object> result = new HashMap<>(); for (Map.Entry<String, Object> entry : map.entrySet()) { String propertyName = entry.getKey(); Object propertyValue = entry.getValue(); if (propertyValue == null) { continue; } if (propertyValue instanceof Long) { result.put(propertyName, String.valueOf(propertyValue)); } else { result.put(propertyName, propertyValue); } if (TablesConstants.METADATA_KEYS.contains(propertyName) || propertyName.endsWith(TablesConstants.ODATA_TYPE_KEY_SUFFIX)) { continue; } EntityDataModelType typeToTag = EntityDataModelType.forClass(propertyValue.getClass()); if (typeToTag == null) { continue; } result.putIfAbsent(propertyName + TablesConstants.ODATA_TYPE_KEY_SUFFIX, typeToTag.getEdmType()); } return result; } @Override public <U> U deserialize(String value, Type type, SerializerEncoding serializerEncoding) throws IOException { if (type == TableEntityQueryResponse.class) { return deserialize(new ByteArrayInputStream(value.getBytes(StandardCharsets.UTF_8)), type, serializerEncoding); } else { return super.deserialize(value, type, serializerEncoding); } } @Override public <U> U deserialize(InputStream inputStream, Type type, SerializerEncoding serializerEncoding) throws IOException { if (type == TableEntityQueryResponse.class) { return deserializeTableEntityQueryResponse(inputStream); } else { return super.deserialize(inputStream, type, serializerEncoding); } } @SuppressWarnings("unchecked") private <U> U deserializeTableEntityQueryResponse(InputStream inputStream) throws IOException { String odataMetadata = null; List<Map<String, Object>> values = new ArrayList<>(); final JsonNode node = super.serializer().readTree(inputStream); Map<String, Object> singleValue = null; for (Iterator<Map.Entry<String, JsonNode>> it = node.fields(); it.hasNext();) { final Map.Entry<String, JsonNode> entry = it.next(); final String fieldName = entry.getKey(); final JsonNode childNode = entry.getValue(); if (fieldName.equals(TablesConstants.ODATA_METADATA_KEY)) { odataMetadata = childNode.asText(); } else if (fieldName.equals("value")) { if (childNode.isArray()) { for (JsonNode childEntry : childNode) { values.add(getEntityFieldsAsMap(childEntry)); } } else { if (singleValue == null) { singleValue = new HashMap<>(); } singleValue.put(fieldName, getEntityFieldAsObject(node, fieldName)); } } else { if (singleValue == null) { singleValue = new HashMap<>(); } singleValue.put(fieldName, getEntityFieldAsObject(node, fieldName)); } } if (singleValue != null) { if (values.size() > 0) { throw logger.logExceptionAsError(new IllegalStateException( "Unexpected response format. Response containing a 'value' array must not contain other properties." )); } values.add(singleValue); } return (U) new TableEntityQueryResponse() .setOdataMetadata(odataMetadata) .setValue(values); } private Map<String, Object> getEntityFieldsAsMap(JsonNode node) throws IOException { Map<String, Object> result = new HashMap<>(); for (Iterator<String> it = node.fieldNames(); it.hasNext();) { String fieldName = it.next(); result.put(fieldName, getEntityFieldAsObject(node, fieldName)); } return result; } }
Can you add tests for the new `offset` and `length` properties?
public int getLength() { return length; }
return length;
public int getLength() { return length; }
class SentenceSentiment { private final String text; private final TextSentiment sentiment; private final SentimentConfidenceScores confidenceScores; private final IterableStream<MinedOpinion> minedOpinions; private final int offset; private final int length; /** * Creates a {@link SentenceSentiment} model that describes the sentiment analysis of sentence. * * @param text The sentence text. * @param sentiment The sentiment label of the sentence. * @param confidenceScores The sentiment confidence score (Softmax score) between 0 and 1, for each sentiment label. * Higher values signify higher confidence. */ public SentenceSentiment(String text, TextSentiment sentiment, SentimentConfidenceScores confidenceScores) { this.text = text; this.sentiment = sentiment; this.confidenceScores = confidenceScores; this.minedOpinions = null; this.offset = 0; this.length = 0; } /** * Creates a {@link SentenceSentiment} model that describes the sentiment analysis of sentence. * * @param text The sentence text. * @param sentiment The sentiment label of the sentence. * @param confidenceScores The sentiment confidence score (Softmax score) between 0 and 1, for each sentiment label. * Higher values signify higher confidence. * @param minedOpinions The mined opinions of the sentence sentiment. This is only returned if you pass the * opinion mining parameter to the analyze sentiment APIs. * @param offset The start position for the sentence in a document. * @param length The length of sentence. */ public SentenceSentiment(String text, TextSentiment sentiment, SentimentConfidenceScores confidenceScores, IterableStream<MinedOpinion> minedOpinions, int offset, int length) { this.text = text; this.sentiment = sentiment; this.minedOpinions = minedOpinions; this.confidenceScores = confidenceScores; this.offset = offset; this.length = length; } /** * Get the sentence text property. * * @return The text property value. */ public String getText() { return this.text; } /** * Get the text sentiment label: POSITIVE, NEGATIVE, or NEUTRAL. * * @return The {@link TextSentiment}. */ public TextSentiment getSentiment() { return sentiment; } /** * Get the confidence score of the sentiment label. All score values sum up to 1, the higher the score, the * higher the confidence in the sentiment. * * @return The {@link SentimentConfidenceScores}. */ public SentimentConfidenceScores getConfidenceScores() { return confidenceScores; } /** * Get the mined opinions of sentence sentiment. * This is only returned if you pass the opinion mining parameter to the analyze sentiment APIs. * * @return The mined opinions of sentence sentiment. */ public IterableStream<MinedOpinion> getMinedOpinions() { return minedOpinions; } /** * Get the offset of sentence. The start position for the sentence in a document. * * @return The offset of sentence. */ public int getOffset() { return offset; } /** * Get the length of sentence. * * @return The length of sentence. */ }
class SentenceSentiment { private final String text; private final TextSentiment sentiment; private final SentimentConfidenceScores confidenceScores; private final IterableStream<MinedOpinion> minedOpinions; private final int offset; private final int length; /** * Creates a {@link SentenceSentiment} model that describes the sentiment analysis of sentence. * * @param text The sentence text. * @param sentiment The sentiment label of the sentence. * @param confidenceScores The sentiment confidence score (Softmax score) between 0 and 1, for each sentiment label. * Higher values signify higher confidence. */ public SentenceSentiment(String text, TextSentiment sentiment, SentimentConfidenceScores confidenceScores) { this.text = text; this.sentiment = sentiment; this.confidenceScores = confidenceScores; this.minedOpinions = null; this.offset = 0; this.length = 0; } /** * Creates a {@link SentenceSentiment} model that describes the sentiment analysis of sentence. * * @param text The sentence text. * @param sentiment The sentiment label of the sentence. * @param confidenceScores The sentiment confidence score (Softmax score) between 0 and 1, for each sentiment label. * Higher values signify higher confidence. * @param minedOpinions The mined opinions of the sentence sentiment. This is only returned if you pass the * opinion mining parameter to the analyze sentiment APIs. * @param offset The start position for the sentence in a document. * @param length The length of sentence. */ public SentenceSentiment(String text, TextSentiment sentiment, SentimentConfidenceScores confidenceScores, IterableStream<MinedOpinion> minedOpinions, int offset, int length) { this.text = text; this.sentiment = sentiment; this.minedOpinions = minedOpinions; this.confidenceScores = confidenceScores; this.offset = offset; this.length = length; } /** * Get the sentence text property. * * @return The text property value. */ public String getText() { return this.text; } /** * Get the text sentiment label: POSITIVE, NEGATIVE, or NEUTRAL. * * @return The {@link TextSentiment}. */ public TextSentiment getSentiment() { return sentiment; } /** * Get the confidence score of the sentiment label. All score values sum up to 1, the higher the score, the * higher the confidence in the sentiment. * * @return The {@link SentimentConfidenceScores}. */ public SentimentConfidenceScores getConfidenceScores() { return confidenceScores; } /** * Get the mined opinions of sentence sentiment. * This is only returned if you pass the opinion mining parameter to the analyze sentiment APIs. * * @return The mined opinions of sentence sentiment. */ public IterableStream<MinedOpinion> getMinedOpinions() { return minedOpinions; } /** * Get the offset of sentence. The start position for the sentence in a document. * * @return The offset of sentence. */ public int getOffset() { return offset; } /** * Get the length of sentence. * * @return The length of sentence. */ }
All tests that verifying the `SentenceSentiment` has verifying the offset and length. See https://github.com/Azure/azure-sdk-for-java/pull/14599/files/8f914029e29ac9d387803cc94f8db120ed7fcaba#diff-6152e7741d610810c960c85cfc56fbc2R361
public int getLength() { return length; }
return length;
public int getLength() { return length; }
class SentenceSentiment { private final String text; private final TextSentiment sentiment; private final SentimentConfidenceScores confidenceScores; private final IterableStream<MinedOpinion> minedOpinions; private final int offset; private final int length; /** * Creates a {@link SentenceSentiment} model that describes the sentiment analysis of sentence. * * @param text The sentence text. * @param sentiment The sentiment label of the sentence. * @param confidenceScores The sentiment confidence score (Softmax score) between 0 and 1, for each sentiment label. * Higher values signify higher confidence. */ public SentenceSentiment(String text, TextSentiment sentiment, SentimentConfidenceScores confidenceScores) { this.text = text; this.sentiment = sentiment; this.confidenceScores = confidenceScores; this.minedOpinions = null; this.offset = 0; this.length = 0; } /** * Creates a {@link SentenceSentiment} model that describes the sentiment analysis of sentence. * * @param text The sentence text. * @param sentiment The sentiment label of the sentence. * @param confidenceScores The sentiment confidence score (Softmax score) between 0 and 1, for each sentiment label. * Higher values signify higher confidence. * @param minedOpinions The mined opinions of the sentence sentiment. This is only returned if you pass the * opinion mining parameter to the analyze sentiment APIs. * @param offset The start position for the sentence in a document. * @param length The length of sentence. */ public SentenceSentiment(String text, TextSentiment sentiment, SentimentConfidenceScores confidenceScores, IterableStream<MinedOpinion> minedOpinions, int offset, int length) { this.text = text; this.sentiment = sentiment; this.minedOpinions = minedOpinions; this.confidenceScores = confidenceScores; this.offset = offset; this.length = length; } /** * Get the sentence text property. * * @return The text property value. */ public String getText() { return this.text; } /** * Get the text sentiment label: POSITIVE, NEGATIVE, or NEUTRAL. * * @return The {@link TextSentiment}. */ public TextSentiment getSentiment() { return sentiment; } /** * Get the confidence score of the sentiment label. All score values sum up to 1, the higher the score, the * higher the confidence in the sentiment. * * @return The {@link SentimentConfidenceScores}. */ public SentimentConfidenceScores getConfidenceScores() { return confidenceScores; } /** * Get the mined opinions of sentence sentiment. * This is only returned if you pass the opinion mining parameter to the analyze sentiment APIs. * * @return The mined opinions of sentence sentiment. */ public IterableStream<MinedOpinion> getMinedOpinions() { return minedOpinions; } /** * Get the offset of sentence. The start position for the sentence in a document. * * @return The offset of sentence. */ public int getOffset() { return offset; } /** * Get the length of sentence. * * @return The length of sentence. */ }
class SentenceSentiment { private final String text; private final TextSentiment sentiment; private final SentimentConfidenceScores confidenceScores; private final IterableStream<MinedOpinion> minedOpinions; private final int offset; private final int length; /** * Creates a {@link SentenceSentiment} model that describes the sentiment analysis of sentence. * * @param text The sentence text. * @param sentiment The sentiment label of the sentence. * @param confidenceScores The sentiment confidence score (Softmax score) between 0 and 1, for each sentiment label. * Higher values signify higher confidence. */ public SentenceSentiment(String text, TextSentiment sentiment, SentimentConfidenceScores confidenceScores) { this.text = text; this.sentiment = sentiment; this.confidenceScores = confidenceScores; this.minedOpinions = null; this.offset = 0; this.length = 0; } /** * Creates a {@link SentenceSentiment} model that describes the sentiment analysis of sentence. * * @param text The sentence text. * @param sentiment The sentiment label of the sentence. * @param confidenceScores The sentiment confidence score (Softmax score) between 0 and 1, for each sentiment label. * Higher values signify higher confidence. * @param minedOpinions The mined opinions of the sentence sentiment. This is only returned if you pass the * opinion mining parameter to the analyze sentiment APIs. * @param offset The start position for the sentence in a document. * @param length The length of sentence. */ public SentenceSentiment(String text, TextSentiment sentiment, SentimentConfidenceScores confidenceScores, IterableStream<MinedOpinion> minedOpinions, int offset, int length) { this.text = text; this.sentiment = sentiment; this.minedOpinions = minedOpinions; this.confidenceScores = confidenceScores; this.offset = offset; this.length = length; } /** * Get the sentence text property. * * @return The text property value. */ public String getText() { return this.text; } /** * Get the text sentiment label: POSITIVE, NEGATIVE, or NEUTRAL. * * @return The {@link TextSentiment}. */ public TextSentiment getSentiment() { return sentiment; } /** * Get the confidence score of the sentiment label. All score values sum up to 1, the higher the score, the * higher the confidence in the sentiment. * * @return The {@link SentimentConfidenceScores}. */ public SentimentConfidenceScores getConfidenceScores() { return confidenceScores; } /** * Get the mined opinions of sentence sentiment. * This is only returned if you pass the opinion mining parameter to the analyze sentiment APIs. * * @return The mined opinions of sentence sentiment. */ public IterableStream<MinedOpinion> getMinedOpinions() { return minedOpinions; } /** * Get the offset of sentence. The start position for the sentence in a document. * * @return The offset of sentence. */ public int getOffset() { return offset; } /** * Get the length of sentence. * * @return The length of sentence. */ }
Oh I'm so sorry I don't know how I totally missed those tests
public int getLength() { return length; }
return length;
public int getLength() { return length; }
class SentenceSentiment { private final String text; private final TextSentiment sentiment; private final SentimentConfidenceScores confidenceScores; private final IterableStream<MinedOpinion> minedOpinions; private final int offset; private final int length; /** * Creates a {@link SentenceSentiment} model that describes the sentiment analysis of sentence. * * @param text The sentence text. * @param sentiment The sentiment label of the sentence. * @param confidenceScores The sentiment confidence score (Softmax score) between 0 and 1, for each sentiment label. * Higher values signify higher confidence. */ public SentenceSentiment(String text, TextSentiment sentiment, SentimentConfidenceScores confidenceScores) { this.text = text; this.sentiment = sentiment; this.confidenceScores = confidenceScores; this.minedOpinions = null; this.offset = 0; this.length = 0; } /** * Creates a {@link SentenceSentiment} model that describes the sentiment analysis of sentence. * * @param text The sentence text. * @param sentiment The sentiment label of the sentence. * @param confidenceScores The sentiment confidence score (Softmax score) between 0 and 1, for each sentiment label. * Higher values signify higher confidence. * @param minedOpinions The mined opinions of the sentence sentiment. This is only returned if you pass the * opinion mining parameter to the analyze sentiment APIs. * @param offset The start position for the sentence in a document. * @param length The length of sentence. */ public SentenceSentiment(String text, TextSentiment sentiment, SentimentConfidenceScores confidenceScores, IterableStream<MinedOpinion> minedOpinions, int offset, int length) { this.text = text; this.sentiment = sentiment; this.minedOpinions = minedOpinions; this.confidenceScores = confidenceScores; this.offset = offset; this.length = length; } /** * Get the sentence text property. * * @return The text property value. */ public String getText() { return this.text; } /** * Get the text sentiment label: POSITIVE, NEGATIVE, or NEUTRAL. * * @return The {@link TextSentiment}. */ public TextSentiment getSentiment() { return sentiment; } /** * Get the confidence score of the sentiment label. All score values sum up to 1, the higher the score, the * higher the confidence in the sentiment. * * @return The {@link SentimentConfidenceScores}. */ public SentimentConfidenceScores getConfidenceScores() { return confidenceScores; } /** * Get the mined opinions of sentence sentiment. * This is only returned if you pass the opinion mining parameter to the analyze sentiment APIs. * * @return The mined opinions of sentence sentiment. */ public IterableStream<MinedOpinion> getMinedOpinions() { return minedOpinions; } /** * Get the offset of sentence. The start position for the sentence in a document. * * @return The offset of sentence. */ public int getOffset() { return offset; } /** * Get the length of sentence. * * @return The length of sentence. */ }
class SentenceSentiment { private final String text; private final TextSentiment sentiment; private final SentimentConfidenceScores confidenceScores; private final IterableStream<MinedOpinion> minedOpinions; private final int offset; private final int length; /** * Creates a {@link SentenceSentiment} model that describes the sentiment analysis of sentence. * * @param text The sentence text. * @param sentiment The sentiment label of the sentence. * @param confidenceScores The sentiment confidence score (Softmax score) between 0 and 1, for each sentiment label. * Higher values signify higher confidence. */ public SentenceSentiment(String text, TextSentiment sentiment, SentimentConfidenceScores confidenceScores) { this.text = text; this.sentiment = sentiment; this.confidenceScores = confidenceScores; this.minedOpinions = null; this.offset = 0; this.length = 0; } /** * Creates a {@link SentenceSentiment} model that describes the sentiment analysis of sentence. * * @param text The sentence text. * @param sentiment The sentiment label of the sentence. * @param confidenceScores The sentiment confidence score (Softmax score) between 0 and 1, for each sentiment label. * Higher values signify higher confidence. * @param minedOpinions The mined opinions of the sentence sentiment. This is only returned if you pass the * opinion mining parameter to the analyze sentiment APIs. * @param offset The start position for the sentence in a document. * @param length The length of sentence. */ public SentenceSentiment(String text, TextSentiment sentiment, SentimentConfidenceScores confidenceScores, IterableStream<MinedOpinion> minedOpinions, int offset, int length) { this.text = text; this.sentiment = sentiment; this.minedOpinions = minedOpinions; this.confidenceScores = confidenceScores; this.offset = offset; this.length = length; } /** * Get the sentence text property. * * @return The text property value. */ public String getText() { return this.text; } /** * Get the text sentiment label: POSITIVE, NEGATIVE, or NEUTRAL. * * @return The {@link TextSentiment}. */ public TextSentiment getSentiment() { return sentiment; } /** * Get the confidence score of the sentiment label. All score values sum up to 1, the higher the score, the * higher the confidence in the sentiment. * * @return The {@link SentimentConfidenceScores}. */ public SentimentConfidenceScores getConfidenceScores() { return confidenceScores; } /** * Get the mined opinions of sentence sentiment. * This is only returned if you pass the opinion mining parameter to the analyze sentiment APIs. * * @return The mined opinions of sentence sentiment. */ public IterableStream<MinedOpinion> getMinedOpinions() { return minedOpinions; } /** * Get the offset of sentence. The start position for the sentence in a document. * * @return The offset of sentence. */ public int getOffset() { return offset; } /** * Get the length of sentence. * * @return The length of sentence. */ }
As discussed, to fail early with a better error message, added the check here to throw if both port and redirectUrl are specified.
public InteractiveBrowserCredential build() { ValidationUtil.validate(getClass().getSimpleName(), new HashMap<String, Object>() {{ put("clientId", clientId); }}); return new InteractiveBrowserCredential(clientId, tenantId, port, redirectURL, automaticAuthentication, identityClientOptions); }
return new InteractiveBrowserCredential(clientId, tenantId, port, redirectURL, automaticAuthentication,
public InteractiveBrowserCredential build() { ValidationUtil.validateInteractiveBrowserRedirectUrlSetup(getClass().getSimpleName(), port, redirectUrl); ValidationUtil.validate(getClass().getSimpleName(), new HashMap<String, Object>() {{ put("clientId", clientId); }}); return new InteractiveBrowserCredential(clientId, tenantId, port, redirectUrl, automaticAuthentication, identityClientOptions); }
class InteractiveBrowserCredentialBuilder extends AadCredentialBuilderBase<InteractiveBrowserCredentialBuilder> { private Integer port; private boolean automaticAuthentication = true; private String redirectURL; /** * Sets the port for the local HTTP server, for which {@code http: * registered as a valid reply URL on the application. * * @deprecated Configure the redirectURL as {@code http: * {@link InteractiveBrowserCredentialBuilder * * @param port the port on which the credential will listen for the browser authentication result * @return the InteractiveBrowserCredentialBuilder itself */ @Deprecated public InteractiveBrowserCredentialBuilder port(int port) { this.port = port; return this; } /** * Allows to use an unprotected file specified by <code>cacheFileLocation()</code> instead of * Gnome keyring on Linux. This is restricted by default. * * @return An updated instance of this builder. */ InteractiveBrowserCredentialBuilder allowUnencryptedCache() { this.identityClientOptions.allowUnencryptedCache(); return this; } /** * Enables the shared token cache which is disabled by default. If enabled, the credential will store tokens * in a cache persisted to the machine, protected to the current user, which can be shared by other credentials * and processes. * * @return An updated instance of this builder with if the shared token cache enabled specified. */ InteractiveBrowserCredentialBuilder enablePersistentCache() { this.identityClientOptions.enablePersistentCache(); return this; } /** * Sets the {@link AuthenticationRecord} captured from a previous authentication. * * @param authenticationRecord The Authentication record to be configured. * * @return An updated instance of this builder with the configured authentication record. */ InteractiveBrowserCredentialBuilder authenticationRecord(AuthenticationRecord authenticationRecord) { this.identityClientOptions.setAuthenticationRecord(authenticationRecord); return this; } /** * Sets the Redirect URL where STS will callback the application with the security code. It is required if a custom * client id is specified via {@link InteractiveBrowserCredentialBuilder * redirect URL specified during the application registration. * * @param redirectURL the redirect URL to listen on and receive security code. * * @return An updated instance of this builder with the configured redirect URL. */ public InteractiveBrowserCredentialBuilder redirectUrl(String redirectURL) { this.redirectURL = redirectURL; return this; } /** * Disables the automatic authentication and prevents the {@link InteractiveBrowserCredential} from automatically * prompting the user. If automatic authentication is disabled a {@link AuthenticationRequiredException} * will be thrown from {@link InteractiveBrowserCredential * user interaction is necessary. The application is responsible for handling this exception, and * calling {@link InteractiveBrowserCredential * {@link InteractiveBrowserCredential * * @return An updated instance of this builder with automatic authentication disabled. */ InteractiveBrowserCredentialBuilder disableAutomaticAuthentication() { this.automaticAuthentication = false; return this; } /** * Creates a new {@link InteractiveBrowserCredential} with the current configurations. * * @return a {@link InteractiveBrowserCredential} with the current configurations. */ }
class InteractiveBrowserCredentialBuilder extends AadCredentialBuilderBase<InteractiveBrowserCredentialBuilder> { private Integer port; private boolean automaticAuthentication = true; private String redirectUrl; /** * Sets the port for the local HTTP server, for which {@code http: * registered as a valid reply URL on the application. * * @deprecated Configure the redirect URL as {@code http: * {@link InteractiveBrowserCredentialBuilder * * @param port the port on which the credential will listen for the browser authentication result * @return the InteractiveBrowserCredentialBuilder itself */ @Deprecated public InteractiveBrowserCredentialBuilder port(int port) { this.port = port; return this; } /** * Allows to use an unprotected file specified by <code>cacheFileLocation()</code> instead of * Gnome keyring on Linux. This is restricted by default. * * @return An updated instance of this builder. */ public InteractiveBrowserCredentialBuilder allowUnencryptedCache() { this.identityClientOptions.setAllowUnencryptedCache(true); return this; } /** * Enables the shared token cache which is disabled by default. If enabled, the credential will store tokens * in a cache persisted to the machine, protected to the current user, which can be shared by other credentials * and processes. * * @return An updated instance of this builder with if the shared token cache enabled specified. */ public InteractiveBrowserCredentialBuilder enablePersistentCache() { this.identityClientOptions.enablePersistentCache(); return this; } /** * Sets the {@link AuthenticationRecord} captured from a previous authentication. * * @param authenticationRecord The Authentication record to be configured. * * @return An updated instance of this builder with the configured authentication record. */ public InteractiveBrowserCredentialBuilder authenticationRecord(AuthenticationRecord authenticationRecord) { this.identityClientOptions.setAuthenticationRecord(authenticationRecord); return this; } /** * Sets the Redirect URL where STS will callback the application with the security code. It is required if a custom * client id is specified via {@link InteractiveBrowserCredentialBuilder * redirect URL specified during the application registration. * * @param redirectUrl the redirect URL to listen on and receive security code. * * @return An updated instance of this builder with the configured redirect URL. */ public InteractiveBrowserCredentialBuilder redirectUrl(String redirectUrl) { this.redirectUrl = redirectUrl; return this; } /** * Disables the automatic authentication and prevents the {@link InteractiveBrowserCredential} from automatically * prompting the user. If automatic authentication is disabled a {@link AuthenticationRequiredException} * will be thrown from {@link InteractiveBrowserCredential * user interaction is necessary. The application is responsible for handling this exception, and * calling {@link InteractiveBrowserCredential * {@link InteractiveBrowserCredential * * @return An updated instance of this builder with automatic authentication disabled. */ public InteractiveBrowserCredentialBuilder disableAutomaticAuthentication() { this.automaticAuthentication = false; return this; } /** * Creates a new {@link InteractiveBrowserCredential} with the current configurations. * * @return a {@link InteractiveBrowserCredential} with the current configurations. */ }
No worries. Thank you for reviewing the PR.
public int getLength() { return length; }
return length;
public int getLength() { return length; }
class SentenceSentiment { private final String text; private final TextSentiment sentiment; private final SentimentConfidenceScores confidenceScores; private final IterableStream<MinedOpinion> minedOpinions; private final int offset; private final int length; /** * Creates a {@link SentenceSentiment} model that describes the sentiment analysis of sentence. * * @param text The sentence text. * @param sentiment The sentiment label of the sentence. * @param confidenceScores The sentiment confidence score (Softmax score) between 0 and 1, for each sentiment label. * Higher values signify higher confidence. */ public SentenceSentiment(String text, TextSentiment sentiment, SentimentConfidenceScores confidenceScores) { this.text = text; this.sentiment = sentiment; this.confidenceScores = confidenceScores; this.minedOpinions = null; this.offset = 0; this.length = 0; } /** * Creates a {@link SentenceSentiment} model that describes the sentiment analysis of sentence. * * @param text The sentence text. * @param sentiment The sentiment label of the sentence. * @param confidenceScores The sentiment confidence score (Softmax score) between 0 and 1, for each sentiment label. * Higher values signify higher confidence. * @param minedOpinions The mined opinions of the sentence sentiment. This is only returned if you pass the * opinion mining parameter to the analyze sentiment APIs. * @param offset The start position for the sentence in a document. * @param length The length of sentence. */ public SentenceSentiment(String text, TextSentiment sentiment, SentimentConfidenceScores confidenceScores, IterableStream<MinedOpinion> minedOpinions, int offset, int length) { this.text = text; this.sentiment = sentiment; this.minedOpinions = minedOpinions; this.confidenceScores = confidenceScores; this.offset = offset; this.length = length; } /** * Get the sentence text property. * * @return The text property value. */ public String getText() { return this.text; } /** * Get the text sentiment label: POSITIVE, NEGATIVE, or NEUTRAL. * * @return The {@link TextSentiment}. */ public TextSentiment getSentiment() { return sentiment; } /** * Get the confidence score of the sentiment label. All score values sum up to 1, the higher the score, the * higher the confidence in the sentiment. * * @return The {@link SentimentConfidenceScores}. */ public SentimentConfidenceScores getConfidenceScores() { return confidenceScores; } /** * Get the mined opinions of sentence sentiment. * This is only returned if you pass the opinion mining parameter to the analyze sentiment APIs. * * @return The mined opinions of sentence sentiment. */ public IterableStream<MinedOpinion> getMinedOpinions() { return minedOpinions; } /** * Get the offset of sentence. The start position for the sentence in a document. * * @return The offset of sentence. */ public int getOffset() { return offset; } /** * Get the length of sentence. * * @return The length of sentence. */ }
class SentenceSentiment { private final String text; private final TextSentiment sentiment; private final SentimentConfidenceScores confidenceScores; private final IterableStream<MinedOpinion> minedOpinions; private final int offset; private final int length; /** * Creates a {@link SentenceSentiment} model that describes the sentiment analysis of sentence. * * @param text The sentence text. * @param sentiment The sentiment label of the sentence. * @param confidenceScores The sentiment confidence score (Softmax score) between 0 and 1, for each sentiment label. * Higher values signify higher confidence. */ public SentenceSentiment(String text, TextSentiment sentiment, SentimentConfidenceScores confidenceScores) { this.text = text; this.sentiment = sentiment; this.confidenceScores = confidenceScores; this.minedOpinions = null; this.offset = 0; this.length = 0; } /** * Creates a {@link SentenceSentiment} model that describes the sentiment analysis of sentence. * * @param text The sentence text. * @param sentiment The sentiment label of the sentence. * @param confidenceScores The sentiment confidence score (Softmax score) between 0 and 1, for each sentiment label. * Higher values signify higher confidence. * @param minedOpinions The mined opinions of the sentence sentiment. This is only returned if you pass the * opinion mining parameter to the analyze sentiment APIs. * @param offset The start position for the sentence in a document. * @param length The length of sentence. */ public SentenceSentiment(String text, TextSentiment sentiment, SentimentConfidenceScores confidenceScores, IterableStream<MinedOpinion> minedOpinions, int offset, int length) { this.text = text; this.sentiment = sentiment; this.minedOpinions = minedOpinions; this.confidenceScores = confidenceScores; this.offset = offset; this.length = length; } /** * Get the sentence text property. * * @return The text property value. */ public String getText() { return this.text; } /** * Get the text sentiment label: POSITIVE, NEGATIVE, or NEUTRAL. * * @return The {@link TextSentiment}. */ public TextSentiment getSentiment() { return sentiment; } /** * Get the confidence score of the sentiment label. All score values sum up to 1, the higher the score, the * higher the confidence in the sentiment. * * @return The {@link SentimentConfidenceScores}. */ public SentimentConfidenceScores getConfidenceScores() { return confidenceScores; } /** * Get the mined opinions of sentence sentiment. * This is only returned if you pass the opinion mining parameter to the analyze sentiment APIs. * * @return The mined opinions of sentence sentiment. */ public IterableStream<MinedOpinion> getMinedOpinions() { return minedOpinions; } /** * Get the offset of sentence. The start position for the sentence in a document. * * @return The offset of sentence. */ public int getOffset() { return offset; } /** * Get the length of sentence. * * @return The length of sentence. */ }
These initializations are only on SentenceSentiment and not on `LinkedEntityMatch` or `CategorizedEntity` ?
public SentenceSentiment(String text, TextSentiment sentiment, SentimentConfidenceScores confidenceScores) { this.text = text; this.sentiment = sentiment; this.confidenceScores = confidenceScores; this.minedOpinions = null; this.offset = 0; this.length = 0; }
this.length = 0;
public SentenceSentiment(String text, TextSentiment sentiment, SentimentConfidenceScores confidenceScores) { this.text = text; this.sentiment = sentiment; this.confidenceScores = confidenceScores; this.minedOpinions = null; this.offset = 0; this.length = 0; }
class SentenceSentiment { private final String text; private final TextSentiment sentiment; private final SentimentConfidenceScores confidenceScores; private final IterableStream<MinedOpinion> minedOpinions; private final int offset; private final int length; /** * Creates a {@link SentenceSentiment} model that describes the sentiment analysis of sentence. * * @param text The sentence text. * @param sentiment The sentiment label of the sentence. * @param confidenceScores The sentiment confidence score (Softmax score) between 0 and 1, for each sentiment label. * Higher values signify higher confidence. */ /** * Creates a {@link SentenceSentiment} model that describes the sentiment analysis of sentence. * * @param text The sentence text. * @param sentiment The sentiment label of the sentence. * @param confidenceScores The sentiment confidence score (Softmax score) between 0 and 1, for each sentiment label. * Higher values signify higher confidence. * @param minedOpinions The mined opinions of the sentence sentiment. This is only returned if you pass the * opinion mining parameter to the analyze sentiment APIs. * @param offset The start position for the sentence in a document. * @param length The length of sentence. */ public SentenceSentiment(String text, TextSentiment sentiment, SentimentConfidenceScores confidenceScores, IterableStream<MinedOpinion> minedOpinions, int offset, int length) { this.text = text; this.sentiment = sentiment; this.minedOpinions = minedOpinions; this.confidenceScores = confidenceScores; this.offset = offset; this.length = length; } /** * Get the sentence text property. * * @return The text property value. */ public String getText() { return this.text; } /** * Get the text sentiment label: POSITIVE, NEGATIVE, or NEUTRAL. * * @return The {@link TextSentiment}. */ public TextSentiment getSentiment() { return sentiment; } /** * Get the confidence score of the sentiment label. All score values sum up to 1, the higher the score, the * higher the confidence in the sentiment. * * @return The {@link SentimentConfidenceScores}. */ public SentimentConfidenceScores getConfidenceScores() { return confidenceScores; } /** * Get the mined opinions of sentence sentiment. * This is only returned if you pass the opinion mining parameter to the analyze sentiment APIs. * * @return The mined opinions of sentence sentiment. */ public IterableStream<MinedOpinion> getMinedOpinions() { return minedOpinions; } /** * Get the offset of sentence. The start position for the sentence in a document. * * @return The offset of sentence. */ public int getOffset() { return offset; } /** * Get the length of sentence. * * @return The length of sentence. */ public int getLength() { return length; } }
class SentenceSentiment { private final String text; private final TextSentiment sentiment; private final SentimentConfidenceScores confidenceScores; private final IterableStream<MinedOpinion> minedOpinions; private final int offset; private final int length; /** * Creates a {@link SentenceSentiment} model that describes the sentiment analysis of sentence. * * @param text The sentence text. * @param sentiment The sentiment label of the sentence. * @param confidenceScores The sentiment confidence score (Softmax score) between 0 and 1, for each sentiment label. * Higher values signify higher confidence. */ /** * Creates a {@link SentenceSentiment} model that describes the sentiment analysis of sentence. * * @param text The sentence text. * @param sentiment The sentiment label of the sentence. * @param confidenceScores The sentiment confidence score (Softmax score) between 0 and 1, for each sentiment label. * Higher values signify higher confidence. * @param minedOpinions The mined opinions of the sentence sentiment. This is only returned if you pass the * opinion mining parameter to the analyze sentiment APIs. * @param offset The start position for the sentence in a document. * @param length The length of sentence. */ public SentenceSentiment(String text, TextSentiment sentiment, SentimentConfidenceScores confidenceScores, IterableStream<MinedOpinion> minedOpinions, int offset, int length) { this.text = text; this.sentiment = sentiment; this.minedOpinions = minedOpinions; this.confidenceScores = confidenceScores; this.offset = offset; this.length = length; } /** * Get the sentence text property. * * @return The text property value. */ public String getText() { return this.text; } /** * Get the text sentiment label: POSITIVE, NEGATIVE, or NEUTRAL. * * @return The {@link TextSentiment}. */ public TextSentiment getSentiment() { return sentiment; } /** * Get the confidence score of the sentiment label. All score values sum up to 1, the higher the score, the * higher the confidence in the sentiment. * * @return The {@link SentimentConfidenceScores}. */ public SentimentConfidenceScores getConfidenceScores() { return confidenceScores; } /** * Get the mined opinions of sentence sentiment. * This is only returned if you pass the opinion mining parameter to the analyze sentiment APIs. * * @return The mined opinions of sentence sentiment. */ public IterableStream<MinedOpinion> getMinedOpinions() { return minedOpinions; } /** * Get the offset of sentence. The start position for the sentence in a document. * * @return The offset of sentence. */ public int getOffset() { return offset; } /** * Get the length of sentence. * * @return The length of sentence. */ public int getLength() { return length; } }
we do have in LinkedEntityMatch: https://github.com/Azure/azure-sdk-for-java/blob/master/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/models/LinkedEntityMatch.java#L44 and CategorizedEntity: https://github.com/Azure/azure-sdk-for-java/blob/master/sdk/textanalytics/azure-ai-textanalytics/src/main/java/com/azure/ai/textanalytics/models/CategorizedEntity.java#L56
public SentenceSentiment(String text, TextSentiment sentiment, SentimentConfidenceScores confidenceScores) { this.text = text; this.sentiment = sentiment; this.confidenceScores = confidenceScores; this.minedOpinions = null; this.offset = 0; this.length = 0; }
this.length = 0;
public SentenceSentiment(String text, TextSentiment sentiment, SentimentConfidenceScores confidenceScores) { this.text = text; this.sentiment = sentiment; this.confidenceScores = confidenceScores; this.minedOpinions = null; this.offset = 0; this.length = 0; }
class SentenceSentiment { private final String text; private final TextSentiment sentiment; private final SentimentConfidenceScores confidenceScores; private final IterableStream<MinedOpinion> minedOpinions; private final int offset; private final int length; /** * Creates a {@link SentenceSentiment} model that describes the sentiment analysis of sentence. * * @param text The sentence text. * @param sentiment The sentiment label of the sentence. * @param confidenceScores The sentiment confidence score (Softmax score) between 0 and 1, for each sentiment label. * Higher values signify higher confidence. */ /** * Creates a {@link SentenceSentiment} model that describes the sentiment analysis of sentence. * * @param text The sentence text. * @param sentiment The sentiment label of the sentence. * @param confidenceScores The sentiment confidence score (Softmax score) between 0 and 1, for each sentiment label. * Higher values signify higher confidence. * @param minedOpinions The mined opinions of the sentence sentiment. This is only returned if you pass the * opinion mining parameter to the analyze sentiment APIs. * @param offset The start position for the sentence in a document. * @param length The length of sentence. */ public SentenceSentiment(String text, TextSentiment sentiment, SentimentConfidenceScores confidenceScores, IterableStream<MinedOpinion> minedOpinions, int offset, int length) { this.text = text; this.sentiment = sentiment; this.minedOpinions = minedOpinions; this.confidenceScores = confidenceScores; this.offset = offset; this.length = length; } /** * Get the sentence text property. * * @return The text property value. */ public String getText() { return this.text; } /** * Get the text sentiment label: POSITIVE, NEGATIVE, or NEUTRAL. * * @return The {@link TextSentiment}. */ public TextSentiment getSentiment() { return sentiment; } /** * Get the confidence score of the sentiment label. All score values sum up to 1, the higher the score, the * higher the confidence in the sentiment. * * @return The {@link SentimentConfidenceScores}. */ public SentimentConfidenceScores getConfidenceScores() { return confidenceScores; } /** * Get the mined opinions of sentence sentiment. * This is only returned if you pass the opinion mining parameter to the analyze sentiment APIs. * * @return The mined opinions of sentence sentiment. */ public IterableStream<MinedOpinion> getMinedOpinions() { return minedOpinions; } /** * Get the offset of sentence. The start position for the sentence in a document. * * @return The offset of sentence. */ public int getOffset() { return offset; } /** * Get the length of sentence. * * @return The length of sentence. */ public int getLength() { return length; } }
class SentenceSentiment { private final String text; private final TextSentiment sentiment; private final SentimentConfidenceScores confidenceScores; private final IterableStream<MinedOpinion> minedOpinions; private final int offset; private final int length; /** * Creates a {@link SentenceSentiment} model that describes the sentiment analysis of sentence. * * @param text The sentence text. * @param sentiment The sentiment label of the sentence. * @param confidenceScores The sentiment confidence score (Softmax score) between 0 and 1, for each sentiment label. * Higher values signify higher confidence. */ /** * Creates a {@link SentenceSentiment} model that describes the sentiment analysis of sentence. * * @param text The sentence text. * @param sentiment The sentiment label of the sentence. * @param confidenceScores The sentiment confidence score (Softmax score) between 0 and 1, for each sentiment label. * Higher values signify higher confidence. * @param minedOpinions The mined opinions of the sentence sentiment. This is only returned if you pass the * opinion mining parameter to the analyze sentiment APIs. * @param offset The start position for the sentence in a document. * @param length The length of sentence. */ public SentenceSentiment(String text, TextSentiment sentiment, SentimentConfidenceScores confidenceScores, IterableStream<MinedOpinion> minedOpinions, int offset, int length) { this.text = text; this.sentiment = sentiment; this.minedOpinions = minedOpinions; this.confidenceScores = confidenceScores; this.offset = offset; this.length = length; } /** * Get the sentence text property. * * @return The text property value. */ public String getText() { return this.text; } /** * Get the text sentiment label: POSITIVE, NEGATIVE, or NEUTRAL. * * @return The {@link TextSentiment}. */ public TextSentiment getSentiment() { return sentiment; } /** * Get the confidence score of the sentiment label. All score values sum up to 1, the higher the score, the * higher the confidence in the sentiment. * * @return The {@link SentimentConfidenceScores}. */ public SentimentConfidenceScores getConfidenceScores() { return confidenceScores; } /** * Get the mined opinions of sentence sentiment. * This is only returned if you pass the opinion mining parameter to the analyze sentiment APIs. * * @return The mined opinions of sentence sentiment. */ public IterableStream<MinedOpinion> getMinedOpinions() { return minedOpinions; } /** * Get the offset of sentence. The start position for the sentence in a document. * * @return The offset of sentence. */ public int getOffset() { return offset; } /** * Get the length of sentence. * * @return The length of sentence. */ public int getLength() { return length; } }
Should this error also apply to service principal authentication as well? I'm not familiar enough with the IntelliJ plug-in, but can you authenticate a service principal in an ADFS instance? Does it not try to actually authenticate the SP, but rather just store the credentials?
public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails = cacheAccessor.getAuthDetailsIfAvailable(); String authType = authDetails.getAuthMethod(); if (authType.equalsIgnoreCase("SP")) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if (authType.equalsIgnoreCase("DC")) { if (isADFSTenant()) { return Mono.error(new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported.")); } JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters parameters = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), refreshToken) .build(); return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parameters)).map(MsalToken::new)); } else { throw logger.logExceptionAsError(new CredentialUnavailableException( "IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE.")); } } catch (IOException e) { return Mono.error(e); } }
+ "authentication unavailable. ADFS tenant/authorities are not supported."));
public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails = cacheAccessor.getAuthDetailsIfAvailable(); String authType = authDetails.getAuthMethod(); if (authType.equalsIgnoreCase("SP")) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if (authType.equalsIgnoreCase("DC")) { if (isADFSTenant()) { return Mono.error(new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported.")); } JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters parameters = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), refreshToken) .build(); return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parameters)).map(MsalToken::new)); } else { throw logger.logExceptionAsError(new CredentialUnavailableException( "IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE.")); } } catch (IOException e) { return Mono.error(e); } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found"; private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String DEFAULT_PUBLIC_CACHE_FILE_NAME = "msal.cache"; private static final String DEFAULT_CONFIDENTIAL_CACHE_FILE_NAME = "msal.confidential.cache"; private static final Path DEFAULT_CACHE_FILE_PATH = Platform.isWindows() ? Paths.get(System.getProperty("user.home"), "AppData", "Local", ".IdentityService") : Paths.get(System.getProperty("user.home"), ".IdentityService"); private static final String DEFAULT_KEYCHAIN_SERVICE = "Microsoft.Developer.IdentityService"; private static final String DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT = "MSALCache"; private static final String DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT = "MSALConfidentialCache"; private static final String DEFAULT_KEYRING_NAME = "default"; private static final String DEFAULT_KEYRING_SCHEMA = "msal.cache"; private static final String DEFAULT_PUBLIC_KEYRING_ITEM_NAME = DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT; private static final String DEFAULT_CONFIDENTIAL_KEYRING_ITEM_NAME = DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT; private static final String DEFAULT_KEYRING_ATTR_NAME = "MsalClientID"; private static final String DEFAULT_KEYRING_ATTR_VALUE = "Microsoft.Developer.IdentityService"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String clientSecret; private final String certificatePath; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String certificatePassword, boolean isSharedTokenCacheCredential, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.clientSecret = clientSecret; this.certificatePath = certificatePath; this.certificatePassword = certificatePassword; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<PublicClientApplication>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<ConfidentialClientApplication>(() -> getConfidentialClientApplication()); } private ConfidentialClientApplication getConfidentialClientApplication() { if (clientId == null) { throw logger.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(certificatePath)); credential = ClientCredentialFactory.createFromCertificate( CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes)); } else { credential = ClientCredentialFactory.createFromCertificate( new FileInputStream(certificatePath), certificatePassword); } } catch (IOException | GeneralSecurityException e) { throw logger.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e)); } } else { throw logger.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path")); } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } if (options.isSharedTokenCacheEnabled()) { try { PersistenceSettings.Builder persistenceSettingsBuilder = PersistenceSettings.builder( DEFAULT_CONFIDENTIAL_CACHE_FILE_NAME, DEFAULT_CACHE_FILE_PATH); if (Platform.isMac()) { persistenceSettingsBuilder.setMacKeychain( DEFAULT_KEYCHAIN_SERVICE, DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT); } if (Platform.isLinux()) { try { persistenceSettingsBuilder .setLinuxKeyring(DEFAULT_KEYRING_NAME, DEFAULT_KEYRING_SCHEMA, DEFAULT_CONFIDENTIAL_KEYRING_ITEM_NAME, DEFAULT_KEYRING_ATTR_NAME, DEFAULT_KEYRING_ATTR_VALUE, null, null); applicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } catch (KeyRingAccessException e) { if (!options.getAllowUnencryptedCache()) { throw logger.logExceptionAsError(e); } persistenceSettingsBuilder.setLinuxUseUnprotectedFileAsCacheStorage(true); applicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } } } catch (Throwable t) { throw logger.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } return applicationBuilder.build(); } private PublicClientApplication getPublicClientApplication(boolean sharedTokenCacheCredential) { if (clientId == null) { throw logger.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (options.isSharedTokenCacheEnabled()) { try { PersistenceSettings.Builder persistenceSettingsBuilder = PersistenceSettings.builder( DEFAULT_PUBLIC_CACHE_FILE_NAME, DEFAULT_CACHE_FILE_PATH); if (Platform.isWindows()) { publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } else if (Platform.isMac()) { persistenceSettingsBuilder.setMacKeychain( DEFAULT_KEYCHAIN_SERVICE, DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } else if (Platform.isLinux()) { try { persistenceSettingsBuilder .setLinuxKeyring(DEFAULT_KEYRING_NAME, DEFAULT_KEYRING_SCHEMA, DEFAULT_PUBLIC_KEYRING_ITEM_NAME, DEFAULT_KEYRING_ATTR_NAME, DEFAULT_KEYRING_ATTR_VALUE, null, null); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } catch (KeyRingAccessException e) { if (!options.getAllowUnencryptedCache()) { throw logger.logExceptionAsError(e); } persistenceSettingsBuilder.setLinuxUseUnprotectedFileAsCacheStorage(true); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } } } catch (Throwable t) { String message = "Shared token cache is unavailable in this environment."; if (sharedTokenCacheCredential) { throw logger.logExceptionAsError(new CredentialUnavailableException(message, t)); } else { throw logger.logExceptionAsError(new ClientAuthenticationException(message, null, t)); } } } return publicClientApplicationBuilder.build(); } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(logger.logExceptionAsError(ex)); } command.append(scopes); AccessToken token = null; BufferedReader reader = null; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw logger.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from.")); } builder.redirectErrorStream(true); Process process = builder.start(); reader = new BufferedReader(new InputStreamReader(process.getInputStream(), "UTF-8")); String line; StringBuilder output = new StringBuilder(); while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) { throw logger.logExceptionAsError( new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed")); } output.append(line); } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo("\"accessToken\": \"(.*?)(\"|$)", processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw logger.logExceptionAsError( new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account")); } throw logger.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw logger.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } finally { try { if (reader != null) { reader.close(); } } catch (IOException ex) { return Mono.error(logger.logExceptionAsError(new IllegalStateException(ex))); } } return Mono.just(token); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())) .map(MsalToken::new)); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(UserNamePasswordParameters.builder( new HashSet<>(request.getScopes()), username, password.toCharArray()).build())) .onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to login to acquire the last token * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (account != null) { parametersBuilder = parametersBuilder.account(account); } try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return pc.acquireToken(parameters); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Sutdio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported.")); } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = accessor.getCredentials("VS Code Azure", cloud); RefreshTokenParameters parameters = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential) .build(); return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parameters)).map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters parameters = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build(); Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { URI redirectUri; try { redirectUri = new URI(HTTP_LOCALHOST + ":" + port); } catch (URISyntaxException e) { return Mono.error(logger.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters parameters = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .build(); Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache.")); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { return Mono.fromCallable(() -> { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw logger.logExceptionAsError( new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw logger.logExceptionAsError( new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String regex, String input) { return input.replaceAll(regex, "****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { logger.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found"; private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String DEFAULT_PUBLIC_CACHE_FILE_NAME = "msal.cache"; private static final String DEFAULT_CONFIDENTIAL_CACHE_FILE_NAME = "msal.confidential.cache"; private static final Path DEFAULT_CACHE_FILE_PATH = Platform.isWindows() ? Paths.get(System.getProperty("user.home"), "AppData", "Local", ".IdentityService") : Paths.get(System.getProperty("user.home"), ".IdentityService"); private static final String DEFAULT_KEYCHAIN_SERVICE = "Microsoft.Developer.IdentityService"; private static final String DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT = "MSALCache"; private static final String DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT = "MSALConfidentialCache"; private static final String DEFAULT_KEYRING_NAME = "default"; private static final String DEFAULT_KEYRING_SCHEMA = "msal.cache"; private static final String DEFAULT_PUBLIC_KEYRING_ITEM_NAME = DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT; private static final String DEFAULT_CONFIDENTIAL_KEYRING_ITEM_NAME = DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT; private static final String DEFAULT_KEYRING_ATTR_NAME = "MsalClientID"; private static final String DEFAULT_KEYRING_ATTR_VALUE = "Microsoft.Developer.IdentityService"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String clientSecret; private final String certificatePath; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String certificatePassword, boolean isSharedTokenCacheCredential, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.clientSecret = clientSecret; this.certificatePath = certificatePath; this.certificatePassword = certificatePassword; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<PublicClientApplication>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<ConfidentialClientApplication>(() -> getConfidentialClientApplication()); } private ConfidentialClientApplication getConfidentialClientApplication() { if (clientId == null) { throw logger.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(certificatePath)); credential = ClientCredentialFactory.createFromCertificate( CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes)); } else { credential = ClientCredentialFactory.createFromCertificate( new FileInputStream(certificatePath), certificatePassword); } } catch (IOException | GeneralSecurityException e) { throw logger.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e)); } } else { throw logger.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path")); } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } if (options.isSharedTokenCacheEnabled()) { try { PersistenceSettings.Builder persistenceSettingsBuilder = PersistenceSettings.builder( DEFAULT_CONFIDENTIAL_CACHE_FILE_NAME, DEFAULT_CACHE_FILE_PATH); if (Platform.isMac()) { persistenceSettingsBuilder.setMacKeychain( DEFAULT_KEYCHAIN_SERVICE, DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT); } if (Platform.isLinux()) { try { persistenceSettingsBuilder .setLinuxKeyring(DEFAULT_KEYRING_NAME, DEFAULT_KEYRING_SCHEMA, DEFAULT_CONFIDENTIAL_KEYRING_ITEM_NAME, DEFAULT_KEYRING_ATTR_NAME, DEFAULT_KEYRING_ATTR_VALUE, null, null); applicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } catch (KeyRingAccessException e) { if (!options.getAllowUnencryptedCache()) { throw logger.logExceptionAsError(e); } persistenceSettingsBuilder.setLinuxUseUnprotectedFileAsCacheStorage(true); applicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } } } catch (Throwable t) { throw logger.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } return applicationBuilder.build(); } private PublicClientApplication getPublicClientApplication(boolean sharedTokenCacheCredential) { if (clientId == null) { throw logger.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (options.isSharedTokenCacheEnabled()) { try { PersistenceSettings.Builder persistenceSettingsBuilder = PersistenceSettings.builder( DEFAULT_PUBLIC_CACHE_FILE_NAME, DEFAULT_CACHE_FILE_PATH); if (Platform.isWindows()) { publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } else if (Platform.isMac()) { persistenceSettingsBuilder.setMacKeychain( DEFAULT_KEYCHAIN_SERVICE, DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } else if (Platform.isLinux()) { try { persistenceSettingsBuilder .setLinuxKeyring(DEFAULT_KEYRING_NAME, DEFAULT_KEYRING_SCHEMA, DEFAULT_PUBLIC_KEYRING_ITEM_NAME, DEFAULT_KEYRING_ATTR_NAME, DEFAULT_KEYRING_ATTR_VALUE, null, null); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } catch (KeyRingAccessException e) { if (!options.getAllowUnencryptedCache()) { throw logger.logExceptionAsError(e); } persistenceSettingsBuilder.setLinuxUseUnprotectedFileAsCacheStorage(true); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } } } catch (Throwable t) { String message = "Shared token cache is unavailable in this environment."; if (sharedTokenCacheCredential) { throw logger.logExceptionAsError(new CredentialUnavailableException(message, t)); } else { throw logger.logExceptionAsError(new ClientAuthenticationException(message, null, t)); } } } return publicClientApplicationBuilder.build(); } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(logger.logExceptionAsError(ex)); } command.append(scopes); AccessToken token = null; BufferedReader reader = null; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw logger.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from.")); } builder.redirectErrorStream(true); Process process = builder.start(); reader = new BufferedReader(new InputStreamReader(process.getInputStream(), "UTF-8")); String line; StringBuilder output = new StringBuilder(); while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) { throw logger.logExceptionAsError( new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed")); } output.append(line); } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo("\"accessToken\": \"(.*?)(\"|$)", processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw logger.logExceptionAsError( new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account")); } throw logger.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw logger.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } finally { try { if (reader != null) { reader.close(); } } catch (IOException ex) { return Mono.error(logger.logExceptionAsError(new IllegalStateException(ex))); } } return Mono.just(token); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())) .map(MsalToken::new)); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(UserNamePasswordParameters.builder( new HashSet<>(request.getScopes()), username, password.toCharArray()).build())) .onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to login to acquire the last token * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (account != null) { parametersBuilder = parametersBuilder.account(account); } try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return pc.acquireToken(parameters); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Sutdio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported.")); } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = accessor.getCredentials("VS Code Azure", cloud); RefreshTokenParameters parameters = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential) .build(); return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parameters)).map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters parameters = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build(); Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port, String redirectUrl) { URI redirectUri; String redirect; if (port != null) { redirect = HTTP_LOCALHOST + ":" + port; } else if (redirectUrl != null) { redirect = redirectUrl; } else { redirect = HTTP_LOCALHOST; } try { redirectUri = new URI(redirect); } catch (URISyntaxException e) { return Mono.error(logger.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters parameters = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .build(); Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache.")); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { return Mono.fromCallable(() -> { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw logger.logExceptionAsError( new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw logger.logExceptionAsError( new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String regex, String input) { return input.replaceAll(regex, "****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { logger.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } }
So, the IDE Azure Plugin auth with SP won't succeed as the IDE Plugin passes in invalid resources in the request for the Az Stack. But, the service principal details will get stored in the filesystem and if used via the IntelliJCredential with correct scopes specified, it works fine against Az Stack.
public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails = cacheAccessor.getAuthDetailsIfAvailable(); String authType = authDetails.getAuthMethod(); if (authType.equalsIgnoreCase("SP")) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if (authType.equalsIgnoreCase("DC")) { if (isADFSTenant()) { return Mono.error(new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported.")); } JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters parameters = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), refreshToken) .build(); return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parameters)).map(MsalToken::new)); } else { throw logger.logExceptionAsError(new CredentialUnavailableException( "IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE.")); } } catch (IOException e) { return Mono.error(e); } }
+ "authentication unavailable. ADFS tenant/authorities are not supported."));
public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails = cacheAccessor.getAuthDetailsIfAvailable(); String authType = authDetails.getAuthMethod(); if (authType.equalsIgnoreCase("SP")) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if (authType.equalsIgnoreCase("DC")) { if (isADFSTenant()) { return Mono.error(new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported.")); } JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters parameters = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), refreshToken) .build(); return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parameters)).map(MsalToken::new)); } else { throw logger.logExceptionAsError(new CredentialUnavailableException( "IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE.")); } } catch (IOException e) { return Mono.error(e); } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found"; private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String DEFAULT_PUBLIC_CACHE_FILE_NAME = "msal.cache"; private static final String DEFAULT_CONFIDENTIAL_CACHE_FILE_NAME = "msal.confidential.cache"; private static final Path DEFAULT_CACHE_FILE_PATH = Platform.isWindows() ? Paths.get(System.getProperty("user.home"), "AppData", "Local", ".IdentityService") : Paths.get(System.getProperty("user.home"), ".IdentityService"); private static final String DEFAULT_KEYCHAIN_SERVICE = "Microsoft.Developer.IdentityService"; private static final String DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT = "MSALCache"; private static final String DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT = "MSALConfidentialCache"; private static final String DEFAULT_KEYRING_NAME = "default"; private static final String DEFAULT_KEYRING_SCHEMA = "msal.cache"; private static final String DEFAULT_PUBLIC_KEYRING_ITEM_NAME = DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT; private static final String DEFAULT_CONFIDENTIAL_KEYRING_ITEM_NAME = DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT; private static final String DEFAULT_KEYRING_ATTR_NAME = "MsalClientID"; private static final String DEFAULT_KEYRING_ATTR_VALUE = "Microsoft.Developer.IdentityService"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String clientSecret; private final String certificatePath; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String certificatePassword, boolean isSharedTokenCacheCredential, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.clientSecret = clientSecret; this.certificatePath = certificatePath; this.certificatePassword = certificatePassword; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<PublicClientApplication>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<ConfidentialClientApplication>(() -> getConfidentialClientApplication()); } private ConfidentialClientApplication getConfidentialClientApplication() { if (clientId == null) { throw logger.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(certificatePath)); credential = ClientCredentialFactory.createFromCertificate( CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes)); } else { credential = ClientCredentialFactory.createFromCertificate( new FileInputStream(certificatePath), certificatePassword); } } catch (IOException | GeneralSecurityException e) { throw logger.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e)); } } else { throw logger.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path")); } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } if (options.isSharedTokenCacheEnabled()) { try { PersistenceSettings.Builder persistenceSettingsBuilder = PersistenceSettings.builder( DEFAULT_CONFIDENTIAL_CACHE_FILE_NAME, DEFAULT_CACHE_FILE_PATH); if (Platform.isMac()) { persistenceSettingsBuilder.setMacKeychain( DEFAULT_KEYCHAIN_SERVICE, DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT); } if (Platform.isLinux()) { try { persistenceSettingsBuilder .setLinuxKeyring(DEFAULT_KEYRING_NAME, DEFAULT_KEYRING_SCHEMA, DEFAULT_CONFIDENTIAL_KEYRING_ITEM_NAME, DEFAULT_KEYRING_ATTR_NAME, DEFAULT_KEYRING_ATTR_VALUE, null, null); applicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } catch (KeyRingAccessException e) { if (!options.getAllowUnencryptedCache()) { throw logger.logExceptionAsError(e); } persistenceSettingsBuilder.setLinuxUseUnprotectedFileAsCacheStorage(true); applicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } } } catch (Throwable t) { throw logger.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } return applicationBuilder.build(); } private PublicClientApplication getPublicClientApplication(boolean sharedTokenCacheCredential) { if (clientId == null) { throw logger.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (options.isSharedTokenCacheEnabled()) { try { PersistenceSettings.Builder persistenceSettingsBuilder = PersistenceSettings.builder( DEFAULT_PUBLIC_CACHE_FILE_NAME, DEFAULT_CACHE_FILE_PATH); if (Platform.isWindows()) { publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } else if (Platform.isMac()) { persistenceSettingsBuilder.setMacKeychain( DEFAULT_KEYCHAIN_SERVICE, DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } else if (Platform.isLinux()) { try { persistenceSettingsBuilder .setLinuxKeyring(DEFAULT_KEYRING_NAME, DEFAULT_KEYRING_SCHEMA, DEFAULT_PUBLIC_KEYRING_ITEM_NAME, DEFAULT_KEYRING_ATTR_NAME, DEFAULT_KEYRING_ATTR_VALUE, null, null); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } catch (KeyRingAccessException e) { if (!options.getAllowUnencryptedCache()) { throw logger.logExceptionAsError(e); } persistenceSettingsBuilder.setLinuxUseUnprotectedFileAsCacheStorage(true); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } } } catch (Throwable t) { String message = "Shared token cache is unavailable in this environment."; if (sharedTokenCacheCredential) { throw logger.logExceptionAsError(new CredentialUnavailableException(message, t)); } else { throw logger.logExceptionAsError(new ClientAuthenticationException(message, null, t)); } } } return publicClientApplicationBuilder.build(); } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(logger.logExceptionAsError(ex)); } command.append(scopes); AccessToken token = null; BufferedReader reader = null; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw logger.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from.")); } builder.redirectErrorStream(true); Process process = builder.start(); reader = new BufferedReader(new InputStreamReader(process.getInputStream(), "UTF-8")); String line; StringBuilder output = new StringBuilder(); while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) { throw logger.logExceptionAsError( new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed")); } output.append(line); } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo("\"accessToken\": \"(.*?)(\"|$)", processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw logger.logExceptionAsError( new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account")); } throw logger.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw logger.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } finally { try { if (reader != null) { reader.close(); } } catch (IOException ex) { return Mono.error(logger.logExceptionAsError(new IllegalStateException(ex))); } } return Mono.just(token); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())) .map(MsalToken::new)); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(UserNamePasswordParameters.builder( new HashSet<>(request.getScopes()), username, password.toCharArray()).build())) .onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to login to acquire the last token * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (account != null) { parametersBuilder = parametersBuilder.account(account); } try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return pc.acquireToken(parameters); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Sutdio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported.")); } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = accessor.getCredentials("VS Code Azure", cloud); RefreshTokenParameters parameters = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential) .build(); return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parameters)).map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters parameters = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build(); Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { URI redirectUri; try { redirectUri = new URI(HTTP_LOCALHOST + ":" + port); } catch (URISyntaxException e) { return Mono.error(logger.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters parameters = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .build(); Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache.")); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { return Mono.fromCallable(() -> { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw logger.logExceptionAsError( new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw logger.logExceptionAsError( new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String regex, String input) { return input.replaceAll(regex, "****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { logger.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found"; private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String DEFAULT_PUBLIC_CACHE_FILE_NAME = "msal.cache"; private static final String DEFAULT_CONFIDENTIAL_CACHE_FILE_NAME = "msal.confidential.cache"; private static final Path DEFAULT_CACHE_FILE_PATH = Platform.isWindows() ? Paths.get(System.getProperty("user.home"), "AppData", "Local", ".IdentityService") : Paths.get(System.getProperty("user.home"), ".IdentityService"); private static final String DEFAULT_KEYCHAIN_SERVICE = "Microsoft.Developer.IdentityService"; private static final String DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT = "MSALCache"; private static final String DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT = "MSALConfidentialCache"; private static final String DEFAULT_KEYRING_NAME = "default"; private static final String DEFAULT_KEYRING_SCHEMA = "msal.cache"; private static final String DEFAULT_PUBLIC_KEYRING_ITEM_NAME = DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT; private static final String DEFAULT_CONFIDENTIAL_KEYRING_ITEM_NAME = DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT; private static final String DEFAULT_KEYRING_ATTR_NAME = "MsalClientID"; private static final String DEFAULT_KEYRING_ATTR_VALUE = "Microsoft.Developer.IdentityService"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String clientSecret; private final String certificatePath; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String certificatePassword, boolean isSharedTokenCacheCredential, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.clientSecret = clientSecret; this.certificatePath = certificatePath; this.certificatePassword = certificatePassword; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<PublicClientApplication>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<ConfidentialClientApplication>(() -> getConfidentialClientApplication()); } private ConfidentialClientApplication getConfidentialClientApplication() { if (clientId == null) { throw logger.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(certificatePath)); credential = ClientCredentialFactory.createFromCertificate( CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes)); } else { credential = ClientCredentialFactory.createFromCertificate( new FileInputStream(certificatePath), certificatePassword); } } catch (IOException | GeneralSecurityException e) { throw logger.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e)); } } else { throw logger.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path")); } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } if (options.isSharedTokenCacheEnabled()) { try { PersistenceSettings.Builder persistenceSettingsBuilder = PersistenceSettings.builder( DEFAULT_CONFIDENTIAL_CACHE_FILE_NAME, DEFAULT_CACHE_FILE_PATH); if (Platform.isMac()) { persistenceSettingsBuilder.setMacKeychain( DEFAULT_KEYCHAIN_SERVICE, DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT); } if (Platform.isLinux()) { try { persistenceSettingsBuilder .setLinuxKeyring(DEFAULT_KEYRING_NAME, DEFAULT_KEYRING_SCHEMA, DEFAULT_CONFIDENTIAL_KEYRING_ITEM_NAME, DEFAULT_KEYRING_ATTR_NAME, DEFAULT_KEYRING_ATTR_VALUE, null, null); applicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } catch (KeyRingAccessException e) { if (!options.getAllowUnencryptedCache()) { throw logger.logExceptionAsError(e); } persistenceSettingsBuilder.setLinuxUseUnprotectedFileAsCacheStorage(true); applicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } } } catch (Throwable t) { throw logger.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } return applicationBuilder.build(); } private PublicClientApplication getPublicClientApplication(boolean sharedTokenCacheCredential) { if (clientId == null) { throw logger.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (options.isSharedTokenCacheEnabled()) { try { PersistenceSettings.Builder persistenceSettingsBuilder = PersistenceSettings.builder( DEFAULT_PUBLIC_CACHE_FILE_NAME, DEFAULT_CACHE_FILE_PATH); if (Platform.isWindows()) { publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } else if (Platform.isMac()) { persistenceSettingsBuilder.setMacKeychain( DEFAULT_KEYCHAIN_SERVICE, DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } else if (Platform.isLinux()) { try { persistenceSettingsBuilder .setLinuxKeyring(DEFAULT_KEYRING_NAME, DEFAULT_KEYRING_SCHEMA, DEFAULT_PUBLIC_KEYRING_ITEM_NAME, DEFAULT_KEYRING_ATTR_NAME, DEFAULT_KEYRING_ATTR_VALUE, null, null); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } catch (KeyRingAccessException e) { if (!options.getAllowUnencryptedCache()) { throw logger.logExceptionAsError(e); } persistenceSettingsBuilder.setLinuxUseUnprotectedFileAsCacheStorage(true); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } } } catch (Throwable t) { String message = "Shared token cache is unavailable in this environment."; if (sharedTokenCacheCredential) { throw logger.logExceptionAsError(new CredentialUnavailableException(message, t)); } else { throw logger.logExceptionAsError(new ClientAuthenticationException(message, null, t)); } } } return publicClientApplicationBuilder.build(); } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(logger.logExceptionAsError(ex)); } command.append(scopes); AccessToken token = null; BufferedReader reader = null; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw logger.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from.")); } builder.redirectErrorStream(true); Process process = builder.start(); reader = new BufferedReader(new InputStreamReader(process.getInputStream(), "UTF-8")); String line; StringBuilder output = new StringBuilder(); while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) { throw logger.logExceptionAsError( new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed")); } output.append(line); } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo("\"accessToken\": \"(.*?)(\"|$)", processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw logger.logExceptionAsError( new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account")); } throw logger.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw logger.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } finally { try { if (reader != null) { reader.close(); } } catch (IOException ex) { return Mono.error(logger.logExceptionAsError(new IllegalStateException(ex))); } } return Mono.just(token); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())) .map(MsalToken::new)); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(UserNamePasswordParameters.builder( new HashSet<>(request.getScopes()), username, password.toCharArray()).build())) .onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to login to acquire the last token * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (account != null) { parametersBuilder = parametersBuilder.account(account); } try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return pc.acquireToken(parameters); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Sutdio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported.")); } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = accessor.getCredentials("VS Code Azure", cloud); RefreshTokenParameters parameters = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential) .build(); return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parameters)).map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters parameters = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build(); Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port, String redirectUrl) { URI redirectUri; String redirect; if (port != null) { redirect = HTTP_LOCALHOST + ":" + port; } else if (redirectUrl != null) { redirect = redirectUrl; } else { redirect = HTTP_LOCALHOST; } try { redirectUri = new URI(redirect); } catch (URISyntaxException e) { return Mono.error(logger.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters parameters = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .build(); Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache.")); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { return Mono.fromCallable(() -> { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw logger.logExceptionAsError( new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw logger.logExceptionAsError( new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String regex, String input) { return input.replaceAll(regex, "****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { logger.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } }
It's an interesting definition of "Works Fine", but I suppose we don't need to artificially guard against it.
public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails = cacheAccessor.getAuthDetailsIfAvailable(); String authType = authDetails.getAuthMethod(); if (authType.equalsIgnoreCase("SP")) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if (authType.equalsIgnoreCase("DC")) { if (isADFSTenant()) { return Mono.error(new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported.")); } JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters parameters = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), refreshToken) .build(); return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parameters)).map(MsalToken::new)); } else { throw logger.logExceptionAsError(new CredentialUnavailableException( "IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE.")); } } catch (IOException e) { return Mono.error(e); } }
+ "authentication unavailable. ADFS tenant/authorities are not supported."));
public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails = cacheAccessor.getAuthDetailsIfAvailable(); String authType = authDetails.getAuthMethod(); if (authType.equalsIgnoreCase("SP")) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if (authType.equalsIgnoreCase("DC")) { if (isADFSTenant()) { return Mono.error(new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported.")); } JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters parameters = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), refreshToken) .build(); return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parameters)).map(MsalToken::new)); } else { throw logger.logExceptionAsError(new CredentialUnavailableException( "IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE.")); } } catch (IOException e) { return Mono.error(e); } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found"; private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String DEFAULT_PUBLIC_CACHE_FILE_NAME = "msal.cache"; private static final String DEFAULT_CONFIDENTIAL_CACHE_FILE_NAME = "msal.confidential.cache"; private static final Path DEFAULT_CACHE_FILE_PATH = Platform.isWindows() ? Paths.get(System.getProperty("user.home"), "AppData", "Local", ".IdentityService") : Paths.get(System.getProperty("user.home"), ".IdentityService"); private static final String DEFAULT_KEYCHAIN_SERVICE = "Microsoft.Developer.IdentityService"; private static final String DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT = "MSALCache"; private static final String DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT = "MSALConfidentialCache"; private static final String DEFAULT_KEYRING_NAME = "default"; private static final String DEFAULT_KEYRING_SCHEMA = "msal.cache"; private static final String DEFAULT_PUBLIC_KEYRING_ITEM_NAME = DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT; private static final String DEFAULT_CONFIDENTIAL_KEYRING_ITEM_NAME = DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT; private static final String DEFAULT_KEYRING_ATTR_NAME = "MsalClientID"; private static final String DEFAULT_KEYRING_ATTR_VALUE = "Microsoft.Developer.IdentityService"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String clientSecret; private final String certificatePath; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String certificatePassword, boolean isSharedTokenCacheCredential, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.clientSecret = clientSecret; this.certificatePath = certificatePath; this.certificatePassword = certificatePassword; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<PublicClientApplication>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<ConfidentialClientApplication>(() -> getConfidentialClientApplication()); } private ConfidentialClientApplication getConfidentialClientApplication() { if (clientId == null) { throw logger.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(certificatePath)); credential = ClientCredentialFactory.createFromCertificate( CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes)); } else { credential = ClientCredentialFactory.createFromCertificate( new FileInputStream(certificatePath), certificatePassword); } } catch (IOException | GeneralSecurityException e) { throw logger.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e)); } } else { throw logger.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path")); } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } if (options.isSharedTokenCacheEnabled()) { try { PersistenceSettings.Builder persistenceSettingsBuilder = PersistenceSettings.builder( DEFAULT_CONFIDENTIAL_CACHE_FILE_NAME, DEFAULT_CACHE_FILE_PATH); if (Platform.isMac()) { persistenceSettingsBuilder.setMacKeychain( DEFAULT_KEYCHAIN_SERVICE, DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT); } if (Platform.isLinux()) { try { persistenceSettingsBuilder .setLinuxKeyring(DEFAULT_KEYRING_NAME, DEFAULT_KEYRING_SCHEMA, DEFAULT_CONFIDENTIAL_KEYRING_ITEM_NAME, DEFAULT_KEYRING_ATTR_NAME, DEFAULT_KEYRING_ATTR_VALUE, null, null); applicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } catch (KeyRingAccessException e) { if (!options.getAllowUnencryptedCache()) { throw logger.logExceptionAsError(e); } persistenceSettingsBuilder.setLinuxUseUnprotectedFileAsCacheStorage(true); applicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } } } catch (Throwable t) { throw logger.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } return applicationBuilder.build(); } private PublicClientApplication getPublicClientApplication(boolean sharedTokenCacheCredential) { if (clientId == null) { throw logger.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (options.isSharedTokenCacheEnabled()) { try { PersistenceSettings.Builder persistenceSettingsBuilder = PersistenceSettings.builder( DEFAULT_PUBLIC_CACHE_FILE_NAME, DEFAULT_CACHE_FILE_PATH); if (Platform.isWindows()) { publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } else if (Platform.isMac()) { persistenceSettingsBuilder.setMacKeychain( DEFAULT_KEYCHAIN_SERVICE, DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } else if (Platform.isLinux()) { try { persistenceSettingsBuilder .setLinuxKeyring(DEFAULT_KEYRING_NAME, DEFAULT_KEYRING_SCHEMA, DEFAULT_PUBLIC_KEYRING_ITEM_NAME, DEFAULT_KEYRING_ATTR_NAME, DEFAULT_KEYRING_ATTR_VALUE, null, null); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } catch (KeyRingAccessException e) { if (!options.getAllowUnencryptedCache()) { throw logger.logExceptionAsError(e); } persistenceSettingsBuilder.setLinuxUseUnprotectedFileAsCacheStorage(true); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } } } catch (Throwable t) { String message = "Shared token cache is unavailable in this environment."; if (sharedTokenCacheCredential) { throw logger.logExceptionAsError(new CredentialUnavailableException(message, t)); } else { throw logger.logExceptionAsError(new ClientAuthenticationException(message, null, t)); } } } return publicClientApplicationBuilder.build(); } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(logger.logExceptionAsError(ex)); } command.append(scopes); AccessToken token = null; BufferedReader reader = null; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw logger.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from.")); } builder.redirectErrorStream(true); Process process = builder.start(); reader = new BufferedReader(new InputStreamReader(process.getInputStream(), "UTF-8")); String line; StringBuilder output = new StringBuilder(); while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) { throw logger.logExceptionAsError( new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed")); } output.append(line); } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo("\"accessToken\": \"(.*?)(\"|$)", processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw logger.logExceptionAsError( new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account")); } throw logger.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw logger.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } finally { try { if (reader != null) { reader.close(); } } catch (IOException ex) { return Mono.error(logger.logExceptionAsError(new IllegalStateException(ex))); } } return Mono.just(token); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())) .map(MsalToken::new)); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(UserNamePasswordParameters.builder( new HashSet<>(request.getScopes()), username, password.toCharArray()).build())) .onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to login to acquire the last token * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (account != null) { parametersBuilder = parametersBuilder.account(account); } try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return pc.acquireToken(parameters); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Sutdio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported.")); } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = accessor.getCredentials("VS Code Azure", cloud); RefreshTokenParameters parameters = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential) .build(); return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parameters)).map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters parameters = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build(); Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { URI redirectUri; try { redirectUri = new URI(HTTP_LOCALHOST + ":" + port); } catch (URISyntaxException e) { return Mono.error(logger.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters parameters = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .build(); Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache.")); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { return Mono.fromCallable(() -> { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw logger.logExceptionAsError( new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw logger.logExceptionAsError( new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String regex, String input) { return input.replaceAll(regex, "****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { logger.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found"; private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String DEFAULT_PUBLIC_CACHE_FILE_NAME = "msal.cache"; private static final String DEFAULT_CONFIDENTIAL_CACHE_FILE_NAME = "msal.confidential.cache"; private static final Path DEFAULT_CACHE_FILE_PATH = Platform.isWindows() ? Paths.get(System.getProperty("user.home"), "AppData", "Local", ".IdentityService") : Paths.get(System.getProperty("user.home"), ".IdentityService"); private static final String DEFAULT_KEYCHAIN_SERVICE = "Microsoft.Developer.IdentityService"; private static final String DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT = "MSALCache"; private static final String DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT = "MSALConfidentialCache"; private static final String DEFAULT_KEYRING_NAME = "default"; private static final String DEFAULT_KEYRING_SCHEMA = "msal.cache"; private static final String DEFAULT_PUBLIC_KEYRING_ITEM_NAME = DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT; private static final String DEFAULT_CONFIDENTIAL_KEYRING_ITEM_NAME = DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT; private static final String DEFAULT_KEYRING_ATTR_NAME = "MsalClientID"; private static final String DEFAULT_KEYRING_ATTR_VALUE = "Microsoft.Developer.IdentityService"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String clientSecret; private final String certificatePath; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String certificatePassword, boolean isSharedTokenCacheCredential, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.clientSecret = clientSecret; this.certificatePath = certificatePath; this.certificatePassword = certificatePassword; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<PublicClientApplication>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<ConfidentialClientApplication>(() -> getConfidentialClientApplication()); } private ConfidentialClientApplication getConfidentialClientApplication() { if (clientId == null) { throw logger.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(certificatePath)); credential = ClientCredentialFactory.createFromCertificate( CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes)); } else { credential = ClientCredentialFactory.createFromCertificate( new FileInputStream(certificatePath), certificatePassword); } } catch (IOException | GeneralSecurityException e) { throw logger.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e)); } } else { throw logger.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path")); } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } if (options.isSharedTokenCacheEnabled()) { try { PersistenceSettings.Builder persistenceSettingsBuilder = PersistenceSettings.builder( DEFAULT_CONFIDENTIAL_CACHE_FILE_NAME, DEFAULT_CACHE_FILE_PATH); if (Platform.isMac()) { persistenceSettingsBuilder.setMacKeychain( DEFAULT_KEYCHAIN_SERVICE, DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT); } if (Platform.isLinux()) { try { persistenceSettingsBuilder .setLinuxKeyring(DEFAULT_KEYRING_NAME, DEFAULT_KEYRING_SCHEMA, DEFAULT_CONFIDENTIAL_KEYRING_ITEM_NAME, DEFAULT_KEYRING_ATTR_NAME, DEFAULT_KEYRING_ATTR_VALUE, null, null); applicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } catch (KeyRingAccessException e) { if (!options.getAllowUnencryptedCache()) { throw logger.logExceptionAsError(e); } persistenceSettingsBuilder.setLinuxUseUnprotectedFileAsCacheStorage(true); applicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } } } catch (Throwable t) { throw logger.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } return applicationBuilder.build(); } private PublicClientApplication getPublicClientApplication(boolean sharedTokenCacheCredential) { if (clientId == null) { throw logger.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (options.isSharedTokenCacheEnabled()) { try { PersistenceSettings.Builder persistenceSettingsBuilder = PersistenceSettings.builder( DEFAULT_PUBLIC_CACHE_FILE_NAME, DEFAULT_CACHE_FILE_PATH); if (Platform.isWindows()) { publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } else if (Platform.isMac()) { persistenceSettingsBuilder.setMacKeychain( DEFAULT_KEYCHAIN_SERVICE, DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } else if (Platform.isLinux()) { try { persistenceSettingsBuilder .setLinuxKeyring(DEFAULT_KEYRING_NAME, DEFAULT_KEYRING_SCHEMA, DEFAULT_PUBLIC_KEYRING_ITEM_NAME, DEFAULT_KEYRING_ATTR_NAME, DEFAULT_KEYRING_ATTR_VALUE, null, null); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } catch (KeyRingAccessException e) { if (!options.getAllowUnencryptedCache()) { throw logger.logExceptionAsError(e); } persistenceSettingsBuilder.setLinuxUseUnprotectedFileAsCacheStorage(true); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } } } catch (Throwable t) { String message = "Shared token cache is unavailable in this environment."; if (sharedTokenCacheCredential) { throw logger.logExceptionAsError(new CredentialUnavailableException(message, t)); } else { throw logger.logExceptionAsError(new ClientAuthenticationException(message, null, t)); } } } return publicClientApplicationBuilder.build(); } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(logger.logExceptionAsError(ex)); } command.append(scopes); AccessToken token = null; BufferedReader reader = null; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw logger.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from.")); } builder.redirectErrorStream(true); Process process = builder.start(); reader = new BufferedReader(new InputStreamReader(process.getInputStream(), "UTF-8")); String line; StringBuilder output = new StringBuilder(); while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) { throw logger.logExceptionAsError( new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed")); } output.append(line); } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo("\"accessToken\": \"(.*?)(\"|$)", processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw logger.logExceptionAsError( new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account")); } throw logger.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw logger.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } finally { try { if (reader != null) { reader.close(); } } catch (IOException ex) { return Mono.error(logger.logExceptionAsError(new IllegalStateException(ex))); } } return Mono.just(token); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())) .map(MsalToken::new)); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(UserNamePasswordParameters.builder( new HashSet<>(request.getScopes()), username, password.toCharArray()).build())) .onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to login to acquire the last token * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (account != null) { parametersBuilder = parametersBuilder.account(account); } try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return pc.acquireToken(parameters); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Sutdio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported.")); } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = accessor.getCredentials("VS Code Azure", cloud); RefreshTokenParameters parameters = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential) .build(); return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parameters)).map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters parameters = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build(); Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port, String redirectUrl) { URI redirectUri; String redirect; if (port != null) { redirect = HTTP_LOCALHOST + ":" + port; } else if (redirectUrl != null) { redirect = redirectUrl; } else { redirect = HTTP_LOCALHOST; } try { redirectUri = new URI(redirect); } catch (URISyntaxException e) { return Mono.error(logger.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters parameters = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .build(); Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache.")); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { return Mono.fromCallable(() -> { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw logger.logExceptionAsError( new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw logger.logExceptionAsError( new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String regex, String input) { return input.replaceAll(regex, "****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { logger.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } }
What if the `redirectUrl` contains a port already? Should we throw here is both `port` and `redirectUrl` are specified?
public InteractiveBrowserCredential build() { ValidationUtil.validate(getClass().getSimpleName(), new HashMap<String, Object>() {{ put("clientId", clientId); }}); return new InteractiveBrowserCredential(clientId, tenantId, port, redirectURL, automaticAuthentication, identityClientOptions); }
return new InteractiveBrowserCredential(clientId, tenantId, port, redirectURL, automaticAuthentication,
public InteractiveBrowserCredential build() { ValidationUtil.validateInteractiveBrowserRedirectUrlSetup(getClass().getSimpleName(), port, redirectUrl); ValidationUtil.validate(getClass().getSimpleName(), new HashMap<String, Object>() {{ put("clientId", clientId); }}); return new InteractiveBrowserCredential(clientId, tenantId, port, redirectUrl, automaticAuthentication, identityClientOptions); }
class InteractiveBrowserCredentialBuilder extends AadCredentialBuilderBase<InteractiveBrowserCredentialBuilder> { private Integer port; private boolean automaticAuthentication = true; private String redirectURL; /** * Sets the port for the local HTTP server, for which {@code http: * registered as a valid reply URL on the application. * * @deprecated Configure the redirectURL as {@code http: * {@link InteractiveBrowserCredentialBuilder * * @param port the port on which the credential will listen for the browser authentication result * @return the InteractiveBrowserCredentialBuilder itself */ @Deprecated public InteractiveBrowserCredentialBuilder port(int port) { this.port = port; return this; } /** * Allows to use an unprotected file specified by <code>cacheFileLocation()</code> instead of * Gnome keyring on Linux. This is restricted by default. * * @return An updated instance of this builder. */ InteractiveBrowserCredentialBuilder allowUnencryptedCache() { this.identityClientOptions.allowUnencryptedCache(); return this; } /** * Enables the shared token cache which is disabled by default. If enabled, the credential will store tokens * in a cache persisted to the machine, protected to the current user, which can be shared by other credentials * and processes. * * @return An updated instance of this builder with if the shared token cache enabled specified. */ InteractiveBrowserCredentialBuilder enablePersistentCache() { this.identityClientOptions.enablePersistentCache(); return this; } /** * Sets the {@link AuthenticationRecord} captured from a previous authentication. * * @param authenticationRecord The Authentication record to be configured. * * @return An updated instance of this builder with the configured authentication record. */ InteractiveBrowserCredentialBuilder authenticationRecord(AuthenticationRecord authenticationRecord) { this.identityClientOptions.setAuthenticationRecord(authenticationRecord); return this; } /** * Sets the Redirect URL where STS will callback the application with the security code. It is required if a custom * client id is specified via {@link InteractiveBrowserCredentialBuilder * redirect URL specified during the application registration. * * @param redirectURL the redirect URL to listen on and receive security code. * * @return An updated instance of this builder with the configured redirect URL. */ public InteractiveBrowserCredentialBuilder redirectUrl(String redirectURL) { this.redirectURL = redirectURL; return this; } /** * Disables the automatic authentication and prevents the {@link InteractiveBrowserCredential} from automatically * prompting the user. If automatic authentication is disabled a {@link AuthenticationRequiredException} * will be thrown from {@link InteractiveBrowserCredential * user interaction is necessary. The application is responsible for handling this exception, and * calling {@link InteractiveBrowserCredential * {@link InteractiveBrowserCredential * * @return An updated instance of this builder with automatic authentication disabled. */ InteractiveBrowserCredentialBuilder disableAutomaticAuthentication() { this.automaticAuthentication = false; return this; } /** * Creates a new {@link InteractiveBrowserCredential} with the current configurations. * * @return a {@link InteractiveBrowserCredential} with the current configurations. */ }
class InteractiveBrowserCredentialBuilder extends AadCredentialBuilderBase<InteractiveBrowserCredentialBuilder> { private Integer port; private boolean automaticAuthentication = true; private String redirectUrl; /** * Sets the port for the local HTTP server, for which {@code http: * registered as a valid reply URL on the application. * * @deprecated Configure the redirect URL as {@code http: * {@link InteractiveBrowserCredentialBuilder * * @param port the port on which the credential will listen for the browser authentication result * @return the InteractiveBrowserCredentialBuilder itself */ @Deprecated public InteractiveBrowserCredentialBuilder port(int port) { this.port = port; return this; } /** * Allows to use an unprotected file specified by <code>cacheFileLocation()</code> instead of * Gnome keyring on Linux. This is restricted by default. * * @return An updated instance of this builder. */ public InteractiveBrowserCredentialBuilder allowUnencryptedCache() { this.identityClientOptions.setAllowUnencryptedCache(true); return this; } /** * Enables the shared token cache which is disabled by default. If enabled, the credential will store tokens * in a cache persisted to the machine, protected to the current user, which can be shared by other credentials * and processes. * * @return An updated instance of this builder with if the shared token cache enabled specified. */ public InteractiveBrowserCredentialBuilder enablePersistentCache() { this.identityClientOptions.enablePersistentCache(); return this; } /** * Sets the {@link AuthenticationRecord} captured from a previous authentication. * * @param authenticationRecord The Authentication record to be configured. * * @return An updated instance of this builder with the configured authentication record. */ public InteractiveBrowserCredentialBuilder authenticationRecord(AuthenticationRecord authenticationRecord) { this.identityClientOptions.setAuthenticationRecord(authenticationRecord); return this; } /** * Sets the Redirect URL where STS will callback the application with the security code. It is required if a custom * client id is specified via {@link InteractiveBrowserCredentialBuilder * redirect URL specified during the application registration. * * @param redirectUrl the redirect URL to listen on and receive security code. * * @return An updated instance of this builder with the configured redirect URL. */ public InteractiveBrowserCredentialBuilder redirectUrl(String redirectUrl) { this.redirectUrl = redirectUrl; return this; } /** * Disables the automatic authentication and prevents the {@link InteractiveBrowserCredential} from automatically * prompting the user. If automatic authentication is disabled a {@link AuthenticationRequiredException} * will be thrown from {@link InteractiveBrowserCredential * user interaction is necessary. The application is responsible for handling this exception, and * calling {@link InteractiveBrowserCredential * {@link InteractiveBrowserCredential * * @return An updated instance of this builder with automatic authentication disabled. */ public InteractiveBrowserCredentialBuilder disableAutomaticAuthentication() { this.automaticAuthentication = false; return this; } /** * Creates a new {@link InteractiveBrowserCredential} with the current configurations. * * @return a {@link InteractiveBrowserCredential} with the current configurations. */ }
Yeah, since Service Principal auth is supported against Az Stack from the credential, we don't need to guard against it. It'll be a rare scenario for the user to take this route.
public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails = cacheAccessor.getAuthDetailsIfAvailable(); String authType = authDetails.getAuthMethod(); if (authType.equalsIgnoreCase("SP")) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if (authType.equalsIgnoreCase("DC")) { if (isADFSTenant()) { return Mono.error(new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported.")); } JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters parameters = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), refreshToken) .build(); return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parameters)).map(MsalToken::new)); } else { throw logger.logExceptionAsError(new CredentialUnavailableException( "IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE.")); } } catch (IOException e) { return Mono.error(e); } }
+ "authentication unavailable. ADFS tenant/authorities are not supported."));
public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) { try { IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath()); IntelliJAuthMethodDetails authDetails = cacheAccessor.getAuthDetailsIfAvailable(); String authType = authDetails.getAuthMethod(); if (authType.equalsIgnoreCase("SP")) { Map<String, String> spDetails = cacheAccessor .getIntellijServicePrincipalDetails(authDetails.getCredFilePath()); String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant"); try { ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(spDetails.get("client"), ClientCredentialFactory.createFromSecret(spDetails.get("key"))) .authority(authorityUrl); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else if (options.getProxyOptions() != null) { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } ConfidentialClientApplication application = applicationBuilder.build(); return Mono.fromFuture(application.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())) .build())).map(MsalToken::new); } catch (MalformedURLException e) { return Mono.error(e); } } else if (authType.equalsIgnoreCase("DC")) { if (isADFSTenant()) { return Mono.error(new CredentialUnavailableException("IntelliJCredential " + "authentication unavailable. ADFS tenant/authorities are not supported.")); } JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials(); String refreshToken = intelliJCredentials.get("refreshToken").textValue(); RefreshTokenParameters parameters = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), refreshToken) .build(); return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parameters)).map(MsalToken::new)); } else { throw logger.logExceptionAsError(new CredentialUnavailableException( "IntelliJ Authentication not available." + " Please login with Azure Tools for IntelliJ plugin in the IDE.")); } } catch (IOException e) { return Mono.error(e); } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found"; private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String DEFAULT_PUBLIC_CACHE_FILE_NAME = "msal.cache"; private static final String DEFAULT_CONFIDENTIAL_CACHE_FILE_NAME = "msal.confidential.cache"; private static final Path DEFAULT_CACHE_FILE_PATH = Platform.isWindows() ? Paths.get(System.getProperty("user.home"), "AppData", "Local", ".IdentityService") : Paths.get(System.getProperty("user.home"), ".IdentityService"); private static final String DEFAULT_KEYCHAIN_SERVICE = "Microsoft.Developer.IdentityService"; private static final String DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT = "MSALCache"; private static final String DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT = "MSALConfidentialCache"; private static final String DEFAULT_KEYRING_NAME = "default"; private static final String DEFAULT_KEYRING_SCHEMA = "msal.cache"; private static final String DEFAULT_PUBLIC_KEYRING_ITEM_NAME = DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT; private static final String DEFAULT_CONFIDENTIAL_KEYRING_ITEM_NAME = DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT; private static final String DEFAULT_KEYRING_ATTR_NAME = "MsalClientID"; private static final String DEFAULT_KEYRING_ATTR_VALUE = "Microsoft.Developer.IdentityService"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String clientSecret; private final String certificatePath; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String certificatePassword, boolean isSharedTokenCacheCredential, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.clientSecret = clientSecret; this.certificatePath = certificatePath; this.certificatePassword = certificatePassword; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<PublicClientApplication>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<ConfidentialClientApplication>(() -> getConfidentialClientApplication()); } private ConfidentialClientApplication getConfidentialClientApplication() { if (clientId == null) { throw logger.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(certificatePath)); credential = ClientCredentialFactory.createFromCertificate( CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes)); } else { credential = ClientCredentialFactory.createFromCertificate( new FileInputStream(certificatePath), certificatePassword); } } catch (IOException | GeneralSecurityException e) { throw logger.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e)); } } else { throw logger.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path")); } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } if (options.isSharedTokenCacheEnabled()) { try { PersistenceSettings.Builder persistenceSettingsBuilder = PersistenceSettings.builder( DEFAULT_CONFIDENTIAL_CACHE_FILE_NAME, DEFAULT_CACHE_FILE_PATH); if (Platform.isMac()) { persistenceSettingsBuilder.setMacKeychain( DEFAULT_KEYCHAIN_SERVICE, DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT); } if (Platform.isLinux()) { try { persistenceSettingsBuilder .setLinuxKeyring(DEFAULT_KEYRING_NAME, DEFAULT_KEYRING_SCHEMA, DEFAULT_CONFIDENTIAL_KEYRING_ITEM_NAME, DEFAULT_KEYRING_ATTR_NAME, DEFAULT_KEYRING_ATTR_VALUE, null, null); applicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } catch (KeyRingAccessException e) { if (!options.getAllowUnencryptedCache()) { throw logger.logExceptionAsError(e); } persistenceSettingsBuilder.setLinuxUseUnprotectedFileAsCacheStorage(true); applicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } } } catch (Throwable t) { throw logger.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } return applicationBuilder.build(); } private PublicClientApplication getPublicClientApplication(boolean sharedTokenCacheCredential) { if (clientId == null) { throw logger.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (options.isSharedTokenCacheEnabled()) { try { PersistenceSettings.Builder persistenceSettingsBuilder = PersistenceSettings.builder( DEFAULT_PUBLIC_CACHE_FILE_NAME, DEFAULT_CACHE_FILE_PATH); if (Platform.isWindows()) { publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } else if (Platform.isMac()) { persistenceSettingsBuilder.setMacKeychain( DEFAULT_KEYCHAIN_SERVICE, DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } else if (Platform.isLinux()) { try { persistenceSettingsBuilder .setLinuxKeyring(DEFAULT_KEYRING_NAME, DEFAULT_KEYRING_SCHEMA, DEFAULT_PUBLIC_KEYRING_ITEM_NAME, DEFAULT_KEYRING_ATTR_NAME, DEFAULT_KEYRING_ATTR_VALUE, null, null); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } catch (KeyRingAccessException e) { if (!options.getAllowUnencryptedCache()) { throw logger.logExceptionAsError(e); } persistenceSettingsBuilder.setLinuxUseUnprotectedFileAsCacheStorage(true); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } } } catch (Throwable t) { String message = "Shared token cache is unavailable in this environment."; if (sharedTokenCacheCredential) { throw logger.logExceptionAsError(new CredentialUnavailableException(message, t)); } else { throw logger.logExceptionAsError(new ClientAuthenticationException(message, null, t)); } } } return publicClientApplicationBuilder.build(); } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(logger.logExceptionAsError(ex)); } command.append(scopes); AccessToken token = null; BufferedReader reader = null; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw logger.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from.")); } builder.redirectErrorStream(true); Process process = builder.start(); reader = new BufferedReader(new InputStreamReader(process.getInputStream(), "UTF-8")); String line; StringBuilder output = new StringBuilder(); while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) { throw logger.logExceptionAsError( new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed")); } output.append(line); } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo("\"accessToken\": \"(.*?)(\"|$)", processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw logger.logExceptionAsError( new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account")); } throw logger.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw logger.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } finally { try { if (reader != null) { reader.close(); } } catch (IOException ex) { return Mono.error(logger.logExceptionAsError(new IllegalStateException(ex))); } } return Mono.just(token); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())) .map(MsalToken::new)); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(UserNamePasswordParameters.builder( new HashSet<>(request.getScopes()), username, password.toCharArray()).build())) .onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to login to acquire the last token * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (account != null) { parametersBuilder = parametersBuilder.account(account); } try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return pc.acquireToken(parameters); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Sutdio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported.")); } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = accessor.getCredentials("VS Code Azure", cloud); RefreshTokenParameters parameters = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential) .build(); return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parameters)).map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters parameters = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build(); Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) { URI redirectUri; try { redirectUri = new URI(HTTP_LOCALHOST + ":" + port); } catch (URISyntaxException e) { return Mono.error(logger.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters parameters = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .build(); Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache.")); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { return Mono.fromCallable(() -> { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw logger.logExceptionAsError( new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw logger.logExceptionAsError( new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String regex, String input) { return input.replaceAll(regex, "****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { logger.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } }
class IdentityClient { private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); private static final String WINDOWS_STARTER = "cmd.exe"; private static final String LINUX_MAC_STARTER = "/bin/sh"; private static final String WINDOWS_SWITCHER = "/c"; private static final String LINUX_MAC_SWITCHER = "-c"; private static final String WINDOWS_PROCESS_ERROR_MESSAGE = "'az' is not recognized"; private static final String LINUX_MAC_PROCESS_ERROR_MESSAGE = "(.*)az:(.*)not found"; private static final String DEFAULT_WINDOWS_SYSTEM_ROOT = System.getenv("SystemRoot"); private static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; private static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); private static final String DEFAULT_PUBLIC_CACHE_FILE_NAME = "msal.cache"; private static final String DEFAULT_CONFIDENTIAL_CACHE_FILE_NAME = "msal.confidential.cache"; private static final Path DEFAULT_CACHE_FILE_PATH = Platform.isWindows() ? Paths.get(System.getProperty("user.home"), "AppData", "Local", ".IdentityService") : Paths.get(System.getProperty("user.home"), ".IdentityService"); private static final String DEFAULT_KEYCHAIN_SERVICE = "Microsoft.Developer.IdentityService"; private static final String DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT = "MSALCache"; private static final String DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT = "MSALConfidentialCache"; private static final String DEFAULT_KEYRING_NAME = "default"; private static final String DEFAULT_KEYRING_SCHEMA = "msal.cache"; private static final String DEFAULT_PUBLIC_KEYRING_ITEM_NAME = DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT; private static final String DEFAULT_CONFIDENTIAL_KEYRING_ITEM_NAME = DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT; private static final String DEFAULT_KEYRING_ATTR_NAME = "MsalClientID"; private static final String DEFAULT_KEYRING_ATTR_VALUE = "Microsoft.Developer.IdentityService"; private static final String ADFS_TENANT = "adfs"; private static final String HTTP_LOCALHOST = "http: private final ClientLogger logger = new ClientLogger(IdentityClient.class); private final IdentityClientOptions options; private final String tenantId; private final String clientId; private final String clientSecret; private final String certificatePath; private final String certificatePassword; private HttpPipelineAdapter httpPipelineAdapter; private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor; private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param options the options configuring the client. */ IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath, String certificatePassword, boolean isSharedTokenCacheCredential, IdentityClientOptions options) { if (tenantId == null) { tenantId = "organizations"; } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.clientSecret = clientSecret; this.certificatePath = certificatePath; this.certificatePassword = certificatePassword; this.options = options; this.publicClientApplicationAccessor = new SynchronizedAccessor<PublicClientApplication>(() -> getPublicClientApplication(isSharedTokenCacheCredential)); this.confidentialClientApplicationAccessor = new SynchronizedAccessor<ConfidentialClientApplication>(() -> getConfidentialClientApplication()); } private ConfidentialClientApplication getConfidentialClientApplication() { if (clientId == null) { throw logger.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificatePath != null) { try { if (certificatePassword == null) { byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(certificatePath)); credential = ClientCredentialFactory.createFromCertificate( CertificateUtil.privateKeyFromPem(pemCertificateBytes), CertificateUtil.publicKeyFromPem(pemCertificateBytes)); } else { credential = ClientCredentialFactory.createFromCertificate( new FileInputStream(certificatePath), certificatePassword); } } catch (IOException | GeneralSecurityException e) { throw logger.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e)); } } else { throw logger.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path")); } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } if (options.isSharedTokenCacheEnabled()) { try { PersistenceSettings.Builder persistenceSettingsBuilder = PersistenceSettings.builder( DEFAULT_CONFIDENTIAL_CACHE_FILE_NAME, DEFAULT_CACHE_FILE_PATH); if (Platform.isMac()) { persistenceSettingsBuilder.setMacKeychain( DEFAULT_KEYCHAIN_SERVICE, DEFAULT_CONFIDENTIAL_KEYCHAIN_ACCOUNT); } if (Platform.isLinux()) { try { persistenceSettingsBuilder .setLinuxKeyring(DEFAULT_KEYRING_NAME, DEFAULT_KEYRING_SCHEMA, DEFAULT_CONFIDENTIAL_KEYRING_ITEM_NAME, DEFAULT_KEYRING_ATTR_NAME, DEFAULT_KEYRING_ATTR_VALUE, null, null); applicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } catch (KeyRingAccessException e) { if (!options.getAllowUnencryptedCache()) { throw logger.logExceptionAsError(e); } persistenceSettingsBuilder.setLinuxUseUnprotectedFileAsCacheStorage(true); applicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } } } catch (Throwable t) { throw logger.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } return applicationBuilder.build(); } private PublicClientApplication getPublicClientApplication(boolean sharedTokenCacheCredential) { if (clientId == null) { throw logger.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId; PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId); try { publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { publicClientApplicationBuilder.httpClient(httpPipelineAdapter); } else { publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { publicClientApplicationBuilder.executorService(options.getExecutorService()); } if (options.isSharedTokenCacheEnabled()) { try { PersistenceSettings.Builder persistenceSettingsBuilder = PersistenceSettings.builder( DEFAULT_PUBLIC_CACHE_FILE_NAME, DEFAULT_CACHE_FILE_PATH); if (Platform.isWindows()) { publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } else if (Platform.isMac()) { persistenceSettingsBuilder.setMacKeychain( DEFAULT_KEYCHAIN_SERVICE, DEFAULT_PUBLIC_KEYCHAIN_ACCOUNT); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } else if (Platform.isLinux()) { try { persistenceSettingsBuilder .setLinuxKeyring(DEFAULT_KEYRING_NAME, DEFAULT_KEYRING_SCHEMA, DEFAULT_PUBLIC_KEYRING_ITEM_NAME, DEFAULT_KEYRING_ATTR_NAME, DEFAULT_KEYRING_ATTR_VALUE, null, null); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } catch (KeyRingAccessException e) { if (!options.getAllowUnencryptedCache()) { throw logger.logExceptionAsError(e); } persistenceSettingsBuilder.setLinuxUseUnprotectedFileAsCacheStorage(true); publicClientApplicationBuilder.setTokenCacheAccessAspect( new PersistenceTokenCacheAccessAspect(persistenceSettingsBuilder.build())); } } } catch (Throwable t) { String message = "Shared token cache is unavailable in this environment."; if (sharedTokenCacheCredential) { throw logger.logExceptionAsError(new CredentialUnavailableException(message, t)); } else { throw logger.logExceptionAsError(new ClientAuthenticationException(message, null, t)); } } } return publicClientApplicationBuilder.build(); } /** * Asynchronously acquire a token from Active Directory with Azure CLI. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) { String azCommand = "az account get-access-token --output json --resource "; StringBuilder command = new StringBuilder(); command.append(azCommand); String scopes = ScopeUtil.scopesToResource(request.getScopes()); try { ScopeUtil.validateScope(scopes); } catch (IllegalArgumentException ex) { return Mono.error(logger.logExceptionAsError(ex)); } command.append(scopes); AccessToken token = null; BufferedReader reader = null; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, command.toString()); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw logger.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from.")); } builder.redirectErrorStream(true); Process process = builder.start(); reader = new BufferedReader(new InputStreamReader(process.getInputStream(), "UTF-8")); String line; StringBuilder output = new StringBuilder(); while (true) { line = reader.readLine(); if (line == null) { break; } if (line.startsWith(WINDOWS_PROCESS_ERROR_MESSAGE) || line.matches(LINUX_MAC_PROCESS_ERROR_MESSAGE)) { throw logger.logExceptionAsError( new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed")); } output.append(line); } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo("\"accessToken\": \"(.*?)(\"|$)", processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw logger.logExceptionAsError( new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account")); } throw logger.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw logger.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(processOutput, Map.class, SerializerEncoding.JSON); String accessToken = objectMap.get("accessToken"); String time = objectMap.get("expiresOn"); String timeToSecond = time.substring(0, time.indexOf(".")); String timeJoinedWithT = String.join("T", timeToSecond.split(" ")); OffsetDateTime expiresOn = LocalDateTime.parse(timeJoinedWithT, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.systemDefault()) .toOffsetDateTime().withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } catch (IOException | InterruptedException e) { throw logger.logExceptionAsError(new IllegalStateException(e)); } catch (RuntimeException e) { return Mono.error(logger.logExceptionAsError(e)); } finally { try { if (reader != null) { reader.close(); } } catch (IOException ex) { return Mono.error(logger.logExceptionAsError(new IllegalStateException(ex))); } } return Mono.just(token); } /** * Asynchronously acquire a token from Active Directory with a client secret. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken( ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())) .map(MsalToken::new)); } private HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); HttpLogOptions httpLogOptions = new HttpLogOptions(); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RetryPolicy()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } /** * Asynchronously acquire a token from Active Directory with a username and a password. * * @param request the details of the token request * @param username the username of the user * @param password the password of the user * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request, String username, String password) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(UserNamePasswordParameters.builder( new HashSet<>(request.getScopes()), username, password.toCharArray()).build())) .onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and " + "password", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @param account the account used to login to acquire the last token * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); if (account != null) { parametersBuilder = parametersBuilder.account(account); } try { return pc.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))) .switchIfEmpty(Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())).forceRefresh(true); if (account != null) { forceParametersBuilder = forceParametersBuilder.account(account); } try { return pc.acquireTokenSilently(forceParametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(MsalToken::new))); } /** * Asynchronously acquire a token from the currently logged in client. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) { return confidentialClientApplicationAccessor.getValue() .flatMap(confidentialClient -> Mono.fromFuture(() -> { SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder( new HashSet<>(request.getScopes())); try { return confidentialClient.acquireTokenSilently(parametersBuilder.build()); } catch (MalformedURLException e) { return getFailedCompletableFuture(logger.logExceptionAsError(new RuntimeException(e))); } }).map(ar -> (AccessToken) new MsalToken(ar)) .filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))); } /** * Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide * a device code for login and the user must meet the challenge by authenticating in a browser on the current or a * different device. * * @param request the details of the token request * @param deviceCodeConsumer the user provided closure that will consume the device code challenge * @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device * code expires */ public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { return publicClientApplicationAccessor.getValue().flatMap(pc -> Mono.fromFuture(() -> { DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build(); return pc.acquireToken(parameters); }).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code", null, t)) .map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with Visual Sutdio cached refresh token. * * @param request the details of the token request * @return a Publisher that emits an AccessToken. */ public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) { if (isADFSTenant()) { return Mono.error(new CredentialUnavailableException("VsCodeCredential " + "authentication unavailable. ADFS tenant/authorities are not supported.")); } VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor(); String credential = accessor.getCredentials("VS Code Azure", cloud); RefreshTokenParameters parameters = RefreshTokenParameters .builder(new HashSet<>(request.getScopes()), credential) .build(); return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(pc.acquireToken(parameters)).map(MsalToken::new)); } /** * Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow. * * @param request the details of the token request * @param authorizationCode the oauth2 authorization code * @param redirectUrl the redirectUrl where the authorization code is sent to * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode, URI redirectUrl) { AuthorizationCodeParameters parameters = AuthorizationCodeParameters.builder(authorizationCode, redirectUrl) .scopes(new HashSet<>(request.getScopes())) .build(); Mono<IAuthenticationResult> acquireToken; if (clientSecret != null) { acquireToken = confidentialClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); } else { acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); } return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with authorization code", null, t)).map(MsalToken::new); } /** * Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The * credential will run a minimal local HttpServer at the given port, so {@code http: * listed as a valid reply URL for the application. * * @param request the details of the token request * @param port the port on which the HTTP server is listening * @return a Publisher that emits an AccessToken */ public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port, String redirectUrl) { URI redirectUri; String redirect; if (port != null) { redirect = HTTP_LOCALHOST + ":" + port; } else if (redirectUrl != null) { redirect = redirectUrl; } else { redirect = HTTP_LOCALHOST; } try { redirectUri = new URI(redirect); } catch (URISyntaxException e) { return Mono.error(logger.logExceptionAsError(new RuntimeException(e))); } InteractiveRequestParameters parameters = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .build(); Mono<IAuthenticationResult> acquireToken = publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parameters))); return acquireToken.onErrorMap(t -> new ClientAuthenticationException( "Failed to acquire token with Interactive Browser Authentication.", null, t)).map(MsalToken::new); } /** * Gets token from shared token cache * */ public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) { return publicClientApplicationAccessor.getValue() .flatMap(pc -> Mono.fromFuture(() -> pc.getAccounts()) .onErrorMap(t -> new CredentialUnavailableException( "Cannot get accounts from token cache. Error: " + t.getMessage(), t)) .flatMap(set -> { IAccount requestedAccount; Map<String, IAccount> accounts = new HashMap<>(); if (set.isEmpty()) { return Mono.error(new CredentialUnavailableException("SharedTokenCacheCredential " + "authentication unavailable. No accounts were found in the cache.")); } for (IAccount cached : set) { if (username == null || username.equals(cached.username())) { if (!accounts.containsKey(cached.homeAccountId())) { accounts.put(cached.homeAccountId(), cached); } } } if (accounts.isEmpty()) { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. No account matching the specified username: %s was " + "found in the cache.", username))); } else if (accounts.size() > 1) { if (username == null) { return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication " + "unavailable. Multiple accounts were found in the cache. Use username and " + "tenant id to disambiguate.")); } else { return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential " + "authentication unavailable. Multiple accounts matching the specified username: " + "%s were found in the cache.", username))); } } else { requestedAccount = accounts.values().iterator().next(); } return authenticateWithPublicClientCache(request, requestedAccount); })); } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret, TokenRequestContext request) { return Mono.fromCallable(() -> { String resource = ScopeUtil.scopesToResource(request.getScopes()); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&clientid="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); if (msiSecret != null) { connection.setRequestProperty("Secret", msiSecret); } connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } finally { if (connection != null) { connection.disconnect(); } } }); } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * * @param request the details of the token request * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) { String resource = ScopeUtil.scopesToResource(request.getScopes()); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> { int retry = 1; while (retry <= options.getMaxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: payload.toString())); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()) .useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException exception) { if (connection == null) { throw logger.logExceptionAsError(new RuntimeException( String.format("Could not connect to the url: %s.", url), exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw logger.logExceptionAsError( new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.getRetryTimeout() .apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000; retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.getMaxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { throw logger.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from IMDS, verify your objectId, " + "clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } throw logger.logExceptionAsError(new RuntimeException( String.format("MSI: Failed to acquire tokens after retrying %s times", options.getMaxRetry()))); })); } private Mono<Boolean> checkIMDSAvailable() { StringBuilder payload = new StringBuilder(); try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); } catch (IOException exception) { return Mono.error(exception); } return Mono.fromCallable(() -> { HttpURLConnection connection = null; URL url = new URL(String.format("http: payload.toString())); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(500); connection.connect(); } catch (Exception e) { throw logger.logExceptionAsError( new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + e.getMessage() + ".", e)); } finally { if (connection != null) { connection.disconnect(); } } return true; }); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new IllegalStateException(ex); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Type.HTTP, options.getAddress()); } } private String getSafeWorkingDirectory() { if (isWindowsPlatform()) { if (CoreUtils.isNullOrEmpty(DEFAULT_WINDOWS_SYSTEM_ROOT)) { return null; } return DEFAULT_WINDOWS_SYSTEM_ROOT + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } private boolean isWindowsPlatform() { return System.getProperty("os.name").contains("Windows"); } private String redactInfo(String regex, String input) { return input.replaceAll(regex, "****"); } void openUrl(String url) throws IOException { Runtime rt = Runtime.getRuntime(); String os = System.getProperty("os.name").toLowerCase(Locale.ROOT); if (os.contains("win")) { rt.exec("rundll32 url.dll,FileProtocolHandler " + url); } else if (os.contains("mac")) { rt.exec("open " + url); } else if (os.contains("nix") || os.contains("nux")) { rt.exec("xdg-open " + url); } else { logger.error("Browser could not be opened - please open {} in a browser on this device.", url); } } private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) { CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(e); return completableFuture; } private void initializeHttpPipelineAdapter() { HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline); } else { HttpClient httpClient = options.getHttpClient(); if (httpClient != null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient)); } else if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault())); } } } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } private boolean isADFSTenant() { return this.tenantId.equals(ADFS_TENANT); } }
can be a map instead
public Mono<String> getComponent(String digitalTwinId, String componentPath) { return getComponentWithResponse(digitalTwinId, componentPath) .flatMap(response -> Mono.justOrEmpty(response.getValue())); }
.flatMap(response -> Mono.justOrEmpty(response.getValue()));
public Mono<String> getComponent(String digitalTwinId, String componentPath) { return getComponentWithResponse(digitalTwinId, componentPath) .map(DigitalTwinsResponse::getValue); }
class to convert the relationship to. * @param <T> The generic type to convert the relationship to. * @return A {@link PagedFlux}
class to convert the relationship to. * @param <T> The generic type to convert the relationship to. * @return A {@link PagedFlux}
space between `if` and `(`?
protected DigitalTwinsClientBuilder getDigitalTwinsClientBuilder(){ DigitalTwinsClientBuilder builder = new DigitalTwinsClientBuilder() .endpoint(DIGITALTWINS_URL) .tokenCredential(new ClientSecretCredentialBuilder() .tenantId(TENANT_ID) .clientId(CLIENT_ID) .clientSecret(CLIENT_SECRET) .build()); if(interceptorManager.isPlaybackMode()){ builder.httpClient(interceptorManager.getPlaybackClient()); return builder; } if(!interceptorManager.isLiveMode()){ builder.addPolicy(interceptorManager.getRecordPolicy()); } return builder; }
if(interceptorManager.isPlaybackMode()){
protected DigitalTwinsClientBuilder getDigitalTwinsClientBuilder() { DigitalTwinsClientBuilder builder = new DigitalTwinsClientBuilder() .endpoint(DIGITALTWINS_URL); if (interceptorManager.isPlaybackMode()){ builder.httpClient(interceptorManager.getPlaybackClient()); builder.tokenCredential(new FakeCredentials()); return builder; } if (!interceptorManager.isLiveMode()){ builder.addPolicy(interceptorManager.getRecordPolicy()); } builder.tokenCredential(new ClientSecretCredentialBuilder() .tenantId(TENANT_ID) .clientId(CLIENT_ID) .clientSecret(CLIENT_SECRET) .build()); return builder; }
class DigitalTwinsTestBase extends TestBase { protected static final String TENANT_ID = Configuration.getGlobalConfiguration() .get("TENANT_ID", "tenantId"); protected static final String CLIENT_SECRET = Configuration.getGlobalConfiguration() .get("CLIENT_SECRET", "clientSecret"); protected static final String CLIENT_ID = Configuration.getGlobalConfiguration() .get("CLIENT_ID", "clientId"); protected static final String DIGITALTWINS_URL = Configuration.getGlobalConfiguration() .get("DIGITALTWINS_URL", "https: protected DigitalTwinsClientBuilder getDigitalTwinsClientBuilder(HttpPipelinePolicy... policies){ DigitalTwinsClientBuilder builder = new DigitalTwinsClientBuilder() .endpoint(DIGITALTWINS_URL) .tokenCredential(new ClientSecretCredentialBuilder() .tenantId(TENANT_ID) .clientId(CLIENT_ID) .clientSecret(CLIENT_SECRET) .build()); if(interceptorManager.isPlaybackMode()){ builder.httpClient(interceptorManager.getPlaybackClient()); addPolicies(builder, policies); return builder; } addPolicies(builder, policies); if(!interceptorManager.isLiveMode()){ builder.addPolicy(interceptorManager.getRecordPolicy()); } return builder; } private static void addPolicies(DigitalTwinsClientBuilder builder, HttpPipelinePolicy... policies) { if (policies == null) { return; } for (HttpPipelinePolicy policy : policies) { builder.addPolicy(policy); } } }
class DigitalTwinsTestBase extends TestBase { protected static final String TENANT_ID = Configuration.getGlobalConfiguration() .get("TENANT_ID", "tenantId"); protected static final String CLIENT_SECRET = Configuration.getGlobalConfiguration() .get("CLIENT_SECRET", "clientSecret"); protected static final String CLIENT_ID = Configuration.getGlobalConfiguration() .get("CLIENT_ID", "clientId"); protected static final String DIGITALTWINS_URL = Configuration.getGlobalConfiguration() .get("DIGITALTWINS_URL", "https: protected DigitalTwinsClientBuilder getDigitalTwinsClientBuilder(HttpPipelinePolicy... policies) { DigitalTwinsClientBuilder builder = new DigitalTwinsClientBuilder() .endpoint(DIGITALTWINS_URL); if (interceptorManager.isPlaybackMode()){ builder.httpClient(interceptorManager.getPlaybackClient()); builder.tokenCredential(new FakeCredentials()); addPolicies(builder, policies); return builder; } addPolicies(builder, policies); if (!interceptorManager.isLiveMode()) { builder.addPolicy(interceptorManager.getRecordPolicy()); } builder.tokenCredential(new ClientSecretCredentialBuilder() .tenantId(TENANT_ID) .clientId(CLIENT_ID) .clientSecret(CLIENT_SECRET) .build()); return builder; } private static void addPolicies(DigitalTwinsClientBuilder builder, HttpPipelinePolicy... policies) { if (policies == null) { return; } for (HttpPipelinePolicy policy : policies) { builder.addPolicy(policy); } } static class FakeCredentials implements TokenCredential { @Override public Mono<AccessToken> getToken(TokenRequestContext tokenRequestContext) { return Mono.empty(); } } }
```java .map(DigitalTwinsResponse::getValue); ```
public Mono<String> getComponent(String digitalTwinId, String componentPath) { return getComponentWithResponse(digitalTwinId, componentPath) .flatMap(response -> Mono.justOrEmpty(response.getValue())); }
.flatMap(response -> Mono.justOrEmpty(response.getValue()));
public Mono<String> getComponent(String digitalTwinId, String componentPath) { return getComponentWithResponse(digitalTwinId, componentPath) .map(DigitalTwinsResponse::getValue); }
class to convert the relationship to. * @param <T> The generic type to convert the relationship to. * @return A {@link PagedFlux}
class to convert the relationship to. * @param <T> The generic type to convert the relationship to. * @return A {@link PagedFlux}
Use `p.getName().orElse("")` instead of `p.getName().get()` to avoid NPE.
public Object execute(final Object[] parameters) { final ReactiveCosmosParameterAccessor accessor = new ReactiveCosmosParameterParameterAccessor(getQueryMethod(), parameters); final ResultProcessor processor = getQueryMethod().getResultProcessor().withDynamicProjection(accessor); List<SqlParameter> sqlParameters = getQueryMethod().getParameters().stream() .map(p -> new SqlParameter("@" + p.getName().get(), toCosmosDbValue(parameters[p.getIndex()]))) .collect(Collectors.toList()); SqlQuerySpec querySpec = new SqlQuerySpec(query, sqlParameters); Flux<?> flux = this.operations.runQuery(querySpec, processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); return flux; }
.map(p -> new SqlParameter("@" + p.getName().get(),
public Object execute(final Object[] parameters) { final ReactiveCosmosParameterAccessor accessor = new ReactiveCosmosParameterParameterAccessor(getQueryMethod(), parameters); final ResultProcessor processor = getQueryMethod().getResultProcessor().withDynamicProjection(accessor); List<SqlParameter> sqlParameters = getQueryMethod().getParameters().stream() .map(p -> new SqlParameter("@" + p.getName().orElse(""), toCosmosDbValue(parameters[p.getIndex()]))) .collect(Collectors.toList()); SqlQuerySpec querySpec = new SqlQuerySpec(query, sqlParameters); Flux<?> flux = this.operations.runQuery(querySpec, processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); return flux; }
class StringBasedReactiveCosmosQuery extends AbstractReactiveCosmosQuery { private final String query; /** * Constructor * @param queryMethod the query method * @param dbOperations the reactive cosmos operations */ public StringBasedReactiveCosmosQuery(ReactiveCosmosQueryMethod queryMethod, ReactiveCosmosOperations dbOperations) { super(queryMethod, dbOperations); this.query = queryMethod.getQueryAnnotatation(); } @Override protected CosmosQuery createQuery(ReactiveCosmosParameterAccessor accessor) { return null; } @Override @Override protected boolean isDeleteQuery() { return false; } @Override protected boolean isExistsQuery() { return false; } }
class StringBasedReactiveCosmosQuery extends AbstractReactiveCosmosQuery { private final String query; /** * Constructor * @param queryMethod the query method * @param dbOperations the reactive cosmos operations */ public StringBasedReactiveCosmosQuery(ReactiveCosmosQueryMethod queryMethod, ReactiveCosmosOperations dbOperations) { super(queryMethod, dbOperations); this.query = queryMethod.getQueryAnnotation(); } @Override protected CosmosQuery createQuery(ReactiveCosmosParameterAccessor accessor) { return null; } @Override @Override protected boolean isDeleteQuery() { return false; } @Override protected boolean isExistsQuery() { return false; } }
done
public Object execute(final Object[] parameters) { final ReactiveCosmosParameterAccessor accessor = new ReactiveCosmosParameterParameterAccessor(getQueryMethod(), parameters); final ResultProcessor processor = getQueryMethod().getResultProcessor().withDynamicProjection(accessor); List<SqlParameter> sqlParameters = getQueryMethod().getParameters().stream() .map(p -> new SqlParameter("@" + p.getName().get(), toCosmosDbValue(parameters[p.getIndex()]))) .collect(Collectors.toList()); SqlQuerySpec querySpec = new SqlQuerySpec(query, sqlParameters); Flux<?> flux = this.operations.runQuery(querySpec, processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); return flux; }
.map(p -> new SqlParameter("@" + p.getName().get(),
public Object execute(final Object[] parameters) { final ReactiveCosmosParameterAccessor accessor = new ReactiveCosmosParameterParameterAccessor(getQueryMethod(), parameters); final ResultProcessor processor = getQueryMethod().getResultProcessor().withDynamicProjection(accessor); List<SqlParameter> sqlParameters = getQueryMethod().getParameters().stream() .map(p -> new SqlParameter("@" + p.getName().orElse(""), toCosmosDbValue(parameters[p.getIndex()]))) .collect(Collectors.toList()); SqlQuerySpec querySpec = new SqlQuerySpec(query, sqlParameters); Flux<?> flux = this.operations.runQuery(querySpec, processor.getReturnedType().getDomainType(), processor.getReturnedType().getReturnedType()); return flux; }
class StringBasedReactiveCosmosQuery extends AbstractReactiveCosmosQuery { private final String query; /** * Constructor * @param queryMethod the query method * @param dbOperations the reactive cosmos operations */ public StringBasedReactiveCosmosQuery(ReactiveCosmosQueryMethod queryMethod, ReactiveCosmosOperations dbOperations) { super(queryMethod, dbOperations); this.query = queryMethod.getQueryAnnotatation(); } @Override protected CosmosQuery createQuery(ReactiveCosmosParameterAccessor accessor) { return null; } @Override @Override protected boolean isDeleteQuery() { return false; } @Override protected boolean isExistsQuery() { return false; } }
class StringBasedReactiveCosmosQuery extends AbstractReactiveCosmosQuery { private final String query; /** * Constructor * @param queryMethod the query method * @param dbOperations the reactive cosmos operations */ public StringBasedReactiveCosmosQuery(ReactiveCosmosQueryMethod queryMethod, ReactiveCosmosOperations dbOperations) { super(queryMethod, dbOperations); this.query = queryMethod.getQueryAnnotation(); } @Override protected CosmosQuery createQuery(ReactiveCosmosParameterAccessor accessor) { return null; } @Override @Override protected boolean isDeleteQuery() { return false; } @Override protected boolean isExistsQuery() { return false; } }
Can we just delete field `method`, use `stringInQueryAnnotation` instead?
public CosmosQueryMethod(Method method, RepositoryMetadata metadata, ProjectionFactory factory) { super(method, metadata, factory); this.method = method; }
this.method = method;
public CosmosQueryMethod(Method method, RepositoryMetadata metadata, ProjectionFactory factory) { super(method, metadata, factory); this.annotatedQueryValue = findAnnotatedQuery(method).orElse(null); }
class CosmosQueryMethod extends QueryMethod { private CosmosEntityMetadata<?> metadata; final Method method; /** * Creates a new {@link CosmosQueryMethod} from the given parameters. Looks up the correct query to use * for following invocations of the method given. * * @param method must not be {@literal null}. * @param metadata must not be {@literal null}. * @param factory must not be {@literal null}. */ @Override @SuppressWarnings("unchecked") public EntityMetadata<?> getEntityInformation() { final Class<Object> domainType = (Class<Object>) getDomainClass(); final CosmosEntityInformation<Object, String> entityInformation = new CosmosEntityInformation<Object, String>(domainType); this.metadata = new SimpleCosmosEntityMetadata<Object>(domainType, entityInformation); return this.metadata; } /** * Returns whether the method has an annotated query. * @return if the query method has an annotated query */ public boolean hasAnnotatedQuery() { return findAnnotatedQuery().isPresent(); } /** * Returns the query string declared in a {@link Query} annotation or {@literal null} if neither the annotation * found * nor the attribute was specified. * * @return the query string or null */ @Nullable public String getQueryAnnotation() { return findAnnotatedQuery().orElse(null); } private Optional<String> findAnnotatedQuery() { return lookupQueryAnnotation() .map(Query::value) .filter(StringUtils::hasText); } private Optional<Query> lookupQueryAnnotation() { return Optional.ofNullable(AnnotatedElementUtils.findMergedAnnotation(method, Query.class)); } }
class CosmosQueryMethod extends QueryMethod { private CosmosEntityMetadata<?> metadata; private final String annotatedQueryValue; /** * Creates a new {@link CosmosQueryMethod} from the given parameters. Looks up the correct query to use * for following invocations of the method given. * * @param method must not be {@literal null}. * @param metadata must not be {@literal null}. * @param factory must not be {@literal null}. */ @Override @SuppressWarnings("unchecked") public EntityMetadata<?> getEntityInformation() { final Class<Object> domainType = (Class<Object>) getDomainClass(); final CosmosEntityInformation<Object, String> entityInformation = new CosmosEntityInformation<Object, String>(domainType); this.metadata = new SimpleCosmosEntityMetadata<Object>(domainType, entityInformation); return this.metadata; } /** * Returns whether the method has an annotated query. * * @return if the query method has an annotated query */ public boolean hasAnnotatedQuery() { return annotatedQueryValue != null; } /** * Returns the query string declared in a {@link Query} annotation or {@literal null} if neither the annotation * found * nor the attribute was specified. * * @return the query string or null */ @Nullable public String getQueryAnnotation() { return annotatedQueryValue; } private Optional<String> findAnnotatedQuery(Method method) { return Optional.ofNullable(AnnotatedElementUtils.findMergedAnnotation(method, Query.class)) .map(Query::value) .filter(StringUtils::hasText); } }
Refactored
public CosmosQueryMethod(Method method, RepositoryMetadata metadata, ProjectionFactory factory) { super(method, metadata, factory); this.method = method; }
this.method = method;
public CosmosQueryMethod(Method method, RepositoryMetadata metadata, ProjectionFactory factory) { super(method, metadata, factory); this.annotatedQueryValue = findAnnotatedQuery(method).orElse(null); }
class CosmosQueryMethod extends QueryMethod { private CosmosEntityMetadata<?> metadata; final Method method; /** * Creates a new {@link CosmosQueryMethod} from the given parameters. Looks up the correct query to use * for following invocations of the method given. * * @param method must not be {@literal null}. * @param metadata must not be {@literal null}. * @param factory must not be {@literal null}. */ @Override @SuppressWarnings("unchecked") public EntityMetadata<?> getEntityInformation() { final Class<Object> domainType = (Class<Object>) getDomainClass(); final CosmosEntityInformation<Object, String> entityInformation = new CosmosEntityInformation<Object, String>(domainType); this.metadata = new SimpleCosmosEntityMetadata<Object>(domainType, entityInformation); return this.metadata; } /** * Returns whether the method has an annotated query. * @return if the query method has an annotated query */ public boolean hasAnnotatedQuery() { return findAnnotatedQuery().isPresent(); } /** * Returns the query string declared in a {@link Query} annotation or {@literal null} if neither the annotation * found * nor the attribute was specified. * * @return the query string or null */ @Nullable public String getQueryAnnotation() { return findAnnotatedQuery().orElse(null); } private Optional<String> findAnnotatedQuery() { return lookupQueryAnnotation() .map(Query::value) .filter(StringUtils::hasText); } private Optional<Query> lookupQueryAnnotation() { return Optional.ofNullable(AnnotatedElementUtils.findMergedAnnotation(method, Query.class)); } }
class CosmosQueryMethod extends QueryMethod { private CosmosEntityMetadata<?> metadata; private final String annotatedQueryValue; /** * Creates a new {@link CosmosQueryMethod} from the given parameters. Looks up the correct query to use * for following invocations of the method given. * * @param method must not be {@literal null}. * @param metadata must not be {@literal null}. * @param factory must not be {@literal null}. */ @Override @SuppressWarnings("unchecked") public EntityMetadata<?> getEntityInformation() { final Class<Object> domainType = (Class<Object>) getDomainClass(); final CosmosEntityInformation<Object, String> entityInformation = new CosmosEntityInformation<Object, String>(domainType); this.metadata = new SimpleCosmosEntityMetadata<Object>(domainType, entityInformation); return this.metadata; } /** * Returns whether the method has an annotated query. * * @return if the query method has an annotated query */ public boolean hasAnnotatedQuery() { return annotatedQueryValue != null; } /** * Returns the query string declared in a {@link Query} annotation or {@literal null} if neither the annotation * found * nor the attribute was specified. * * @return the query string or null */ @Nullable public String getQueryAnnotation() { return annotatedQueryValue; } private Optional<String> findAnnotatedQuery(Method method) { return Optional.ofNullable(AnnotatedElementUtils.findMergedAnnotation(method, Query.class)) .map(Query::value) .filter(StringUtils::hasText); } }
So are these two basically the same then? ```java .map(response -> response.getValue()); ``` ```java .flatMap(response -> Mono.justOrEmpty(response.getValue())); ```
public Mono<String> getComponent(String digitalTwinId, String componentPath) { return getComponentWithResponse(digitalTwinId, componentPath) .flatMap(response -> Mono.justOrEmpty(response.getValue())); }
.flatMap(response -> Mono.justOrEmpty(response.getValue()));
public Mono<String> getComponent(String digitalTwinId, String componentPath) { return getComponentWithResponse(digitalTwinId, componentPath) .map(DigitalTwinsResponse::getValue); }
class to convert the relationship to. * @param <T> The generic type to convert the relationship to. * @return A {@link PagedFlux}
class to convert the relationship to. * @param <T> The generic type to convert the relationship to. * @return A {@link PagedFlux}
Yes, they are the same. The first one will take the response emitted by the previous Mono, transform it to `.getValue()` and emit the result. The second one will take the response emitted by the previous Mono, transform it into `.getValue()`, put it into a Mono, flatten it and emit the result.
public Mono<String> getComponent(String digitalTwinId, String componentPath) { return getComponentWithResponse(digitalTwinId, componentPath) .flatMap(response -> Mono.justOrEmpty(response.getValue())); }
.flatMap(response -> Mono.justOrEmpty(response.getValue()));
public Mono<String> getComponent(String digitalTwinId, String componentPath) { return getComponentWithResponse(digitalTwinId, componentPath) .map(DigitalTwinsResponse::getValue); }
class to convert the relationship to. * @param <T> The generic type to convert the relationship to. * @return A {@link PagedFlux}
class to convert the relationship to. * @param <T> The generic type to convert the relationship to. * @return A {@link PagedFlux}