comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
Sure
public void configurePhoneNumber() { PhoneNumber phoneNumber = new PhoneNumber("PHONENUMBER_TO_CONFIGURE"); PstnConfiguration pstnConfiguration = new PstnConfiguration(); pstnConfiguration.setApplicationId("APPLICATION_ID"); pstnConfiguration.setAzurePstnTargetId("AZURE_PSTN_TARGET_ID"); pstnConfiguration.setCallbackUrl("CALLBACK_URL"); try { PhoneNumberClient phoneNumberClient = createPhoneNumberClient(); phoneNumberClient.configureNumber(phoneNumber, pstnConfiguration); } catch (Exception e) { e.printStackTrace(); } }
pstnConfiguration.setAzurePstnTargetId("AZURE_PSTN_TARGET_ID");
public void configurePhoneNumber() { PhoneNumber phoneNumber = new PhoneNumber("PHONENUMBER_TO_CONFIGURE"); PstnConfiguration pstnConfiguration = new PstnConfiguration(); pstnConfiguration.setApplicationId("APPLICATION_ID"); pstnConfiguration.setCallbackUrl("CALLBACK_URL"); try { PhoneNumberClient phoneNumberClient = createPhoneNumberClient(); phoneNumberClient.configureNumber(phoneNumber, pstnConfiguration); } catch (Exception e) { e.printStackTrace(); } }
class ReadmeSamples { /** * Sample code for creating a sync Communication Identity Client. * * @return the Communication Identity Client. * @throws NoSuchAlgorithmException if Communication Client Credential HMAC not available * @throws InvalidKeyException if Communication Client Credential access key is not valid */ public CommunicationIdentityClient createCommunicationIdentityClient() throws InvalidKeyException, NoSuchAlgorithmException { String endpoint = "https: String accessToken = "SECRET"; HttpClient httpClient = new NettyAsyncHttpClientBuilder().build(); CommunicationIdentityClient communicationIdentityClient = new CommunicationIdentityClientBuilder() .endpoint(endpoint) .credential(new CommunicationClientCredential(accessToken)) .httpClient(httpClient) .buildClient(); return communicationIdentityClient; } /** * Sample code for creating a user * * @return the created user */ public CommunicationUser createNewUser() { try { CommunicationIdentityClient communicationIdentityClient = createCommunicationIdentityClient(); CommunicationUser user = communicationIdentityClient.createUser(); System.out.println("User id: " + user.getId()); return user; } catch (Exception e) { e.printStackTrace(); } return null; } /** * Sample code for issuing a user token * * @return the issued user token */ public CommunicationUserToken issueUserToken() { try { CommunicationIdentityClient communicationIdentityClient = createCommunicationIdentityClient(); CommunicationUser user = communicationIdentityClient.createUser(); List<String> scopes = new ArrayList<>(Arrays.asList("chat")); CommunicationUserToken userToken = communicationIdentityClient.issueToken(user, scopes); System.out.println("Token: " + userToken.getToken()); System.out.println("Expires On: " + userToken.getExpiresOn()); return userToken; } catch (Exception e) { e.printStackTrace(); } return null; } /** * Sample code for revoking user token */ public void revokeUserToken() { try { CommunicationIdentityClient communicationIdentityClient = createCommunicationIdentityClient(); CommunicationUser user = createNewUser(); List<String> scopes = new ArrayList<>(Arrays.asList("chat")); communicationIdentityClient.issueToken(user, scopes); communicationIdentityClient.revokeTokens(user, OffsetDateTime.now()); } catch (Exception e) { e.printStackTrace(); } } /** * Sample code for deleting user */ public void deleteUser() { try { CommunicationIdentityClient communicationIdentityClient = createCommunicationIdentityClient(); CommunicationUser user = communicationIdentityClient.createUser(); communicationIdentityClient.deleteUser(user); } catch (Exception e) { e.printStackTrace(); } } /** * Sample code for creating a sync Phone Number Client. * * @return the Phone Number Client. * @throws NoSuchAlgorithmException if Communication Client Credential HMAC not available * @throws InvalidKeyException if Communication Client Credential access key is not valid */ public PhoneNumberClient createPhoneNumberClient() throws NoSuchAlgorithmException, InvalidKeyException { String endpoint = "https: String accessToken = "SECRET"; HttpClient httpClient = new NettyAsyncHttpClientBuilder().build(); PhoneNumberClient phoneNumberClient = new PhoneNumberClientBuilder() .endpoint(endpoint) .credential(new CommunicationClientCredential(accessToken)) .httpClient(httpClient) .buildClient(); return phoneNumberClient; } /** * Sample code to get all supported countries * * @return supported countries */ public PagedIterable<PhoneNumberCountry> getSupportedCountries() { String locale = "en-us"; try { PhoneNumberClient phoneNumberClient = createPhoneNumberClient(); PagedIterable<PhoneNumberCountry> phoneNumberCountries = phoneNumberClient .listAllSupportedCountries(locale); for (PhoneNumberCountry phoneNumberCountry : phoneNumberCountries) { System.out.println("Phone Number Country Code: " + phoneNumberCountry.getCountryCode()); System.out.println("Phone Number Country Name: " + phoneNumberCountry.getLocalizedName()); } return phoneNumberCountries; } catch (Exception e) { e.printStackTrace(); } return null; } /** * Sample code to get a list of all acquired phone numbers * * @return the acquired phone numbers */ public PagedIterable<AcquiredPhoneNumber> getAcquiredPhoneNumbers() { String locale = "en-us"; try { PhoneNumberClient phoneNumberClient = createPhoneNumberClient(); PagedIterable<AcquiredPhoneNumber> acquiredPhoneNumbers = phoneNumberClient .listAllPhoneNumbers(locale); for (AcquiredPhoneNumber acquiredPhoneNumber : acquiredPhoneNumbers) { System.out.println("Acquired Phone Number: " + acquiredPhoneNumber.getPhoneNumber()); } return acquiredPhoneNumbers; } catch (Exception e) { e.printStackTrace(); } return null; } /** * Sample code to get a list of all phone plan groups * * @return phone plans groups */ public PagedIterable<PhonePlanGroup> getPhonePlanGroups() { String countryCode = "US"; String locale = "en-us"; try { PhoneNumberClient phoneNumberClient = createPhoneNumberClient(); PagedIterable<PhonePlanGroup> phonePlanGroups = phoneNumberClient .listPhonePlanGroups(countryCode, locale, true); for (PhonePlanGroup phonePlanGroup : phonePlanGroups) { System.out.println("Phone Plan GroupId: " + phonePlanGroup.getPhonePlanGroupId()); System.out.println("Phone Plan NumberType: " + phonePlanGroup.getPhoneNumberType()); } return phonePlanGroups; } catch (Exception e) { e.printStackTrace(); } return null; } /** * Sample code to get a list of all phone plan instances in a group * * @return phone plans */ public PagedIterable<PhonePlan> getPhonePlansInGroup() { String countryCode = "US"; String locale = "en-us"; String phonePlanGroupId = "PHONE_PLAN_GROUP_ID"; try { PhoneNumberClient phoneNumberClient = createPhoneNumberClient(); PagedIterable<PhonePlan> phonePlans = phoneNumberClient .listPhonePlans(countryCode, phonePlanGroupId, locale); for (PhonePlan phonePlan : phonePlans) { System.out.println("Phone Plan Id: " + phonePlan.getPhonePlanId()); System.out.println("Phone Plan Name: " + phonePlan.getLocalizedName()); System.out.println("Phone Plan Capabilities: " + phonePlan.getCapabilities()); System.out.println("Phone Plan Area Codes: " + phonePlan.getAreaCodes()); } return phonePlans; } catch (Exception e) { e.printStackTrace(); } return null; } /** * Sample code to get the location options for a phone plan * * @return Location Options for a phone plan */ public LocationOptions getPhonePlanLocationOptions() { String countryCode = "US"; String locale = "en-us"; String phonePlanGroupId = "PHONE_PLAN_GROUP_ID"; String phonePlanId = "PHONE_PLAN_ID"; try { PhoneNumberClient phoneNumberClient = createPhoneNumberClient(); LocationOptions locationOptions = phoneNumberClient .getPhonePlanLocationOptions(countryCode, phonePlanGroupId, phonePlanId, locale) .getLocationOptions(); System.out.println("Getting LocationOptions for: " + locationOptions.getLabelId()); for (LocationOptionsDetails locationOptionsDetails : locationOptions.getOptions()) { System.out.println(locationOptionsDetails.getValue()); for (LocationOptions locationOptions1 : locationOptionsDetails.getLocationOptions()) { System.out.println("Getting LocationOptions for: " + locationOptions1.getLabelId()); for (LocationOptionsDetails locationOptionsDetails1 : locationOptions1.getOptions()) { System.out.println(locationOptionsDetails1.getValue()); } } } return locationOptions; } catch (Exception e) { e.printStackTrace(); } return null; } /** * Sample code to get the area codes for a location * * @return Area Codes for a location */ public AreaCodes getAreaCodes() { String countryCode = "US"; String phonePlanId = "PHONE_PLAN_ID"; List<LocationOptionsQuery> locationOptions = new ArrayList<>(); LocationOptionsQuery query = new LocationOptionsQuery(); query.setLabelId("state"); query.setOptionsValue("LOCATION_OPTION_STATE"); locationOptions.add(query); query = new LocationOptionsQuery(); query.setLabelId("city"); query.setOptionsValue("LOCATION_OPTION_CITY"); locationOptions.add(query); try { PhoneNumberClient phoneNumberClient = createPhoneNumberClient(); AreaCodes areaCodes = phoneNumberClient .getAllAreaCodes("selection", countryCode, phonePlanId, locationOptions); for (String areaCode : areaCodes.getPrimaryAreaCodes()) { System.out.println(areaCode); } return areaCodes; } catch (Exception e) { e.printStackTrace(); } return null; } /** * Sample code to create a phone number search * * @return PhoneNumberSearch for the phone plan */ public PhoneNumberSearch createPhoneNumberSearch() { String phonePlanId = "PHONE_PLAN_ID"; List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(phonePlanId); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode("AREA_CODE_FOR_SEARCH") .setDescription("DESCRIPTION_FOR_SEARCH") .setDisplayName("NAME_FOR_SEARCH") .setPhonePlanIds(phonePlanIds) .setQuantity(2); try { PhoneNumberClient phoneNumberClient = createPhoneNumberClient(); CreateSearchResponse createSearchResponse = phoneNumberClient.createSearch(createSearchOptions); System.out.println("SearchId: " + createSearchResponse.getSearchId()); PhoneNumberSearch phoneNumberSearch = phoneNumberClient.getSearchById(createSearchResponse.getSearchId()); for (String phoneNumber : phoneNumberSearch.getPhoneNumbers()) { System.out.println("Phone Number: " + phoneNumber); } return phoneNumberSearch; } catch (Exception e) { e.printStackTrace(); } return null; } /** * Sample code to purchase a phone number search */ public void purchasePhoneNumberSearch() { String phoneNumberSearchId = "SEARCH_ID_TO_PURCHASE"; try { PhoneNumberClient phoneNumberClient = createPhoneNumberClient(); phoneNumberClient.purchaseSearch(phoneNumberSearchId); } catch (Exception e) { e.printStackTrace(); } } /** * Sample code to configure a phone number */ }
class ReadmeSamples { /** * Sample code for creating a sync Communication Identity Client. * * @return the Communication Identity Client. * @throws NoSuchAlgorithmException if Communication Client Credential HMAC not available * @throws InvalidKeyException if Communication Client Credential access key is not valid */ public CommunicationIdentityClient createCommunicationIdentityClient() throws InvalidKeyException, NoSuchAlgorithmException { String endpoint = "https: String accessToken = "SECRET"; HttpClient httpClient = new NettyAsyncHttpClientBuilder().build(); CommunicationIdentityClient communicationIdentityClient = new CommunicationIdentityClientBuilder() .endpoint(endpoint) .credential(new CommunicationClientCredential(accessToken)) .httpClient(httpClient) .buildClient(); return communicationIdentityClient; } /** * Sample code for creating a user * * @return the created user */ public CommunicationUser createNewUser() { try { CommunicationIdentityClient communicationIdentityClient = createCommunicationIdentityClient(); CommunicationUser user = communicationIdentityClient.createUser(); System.out.println("User id: " + user.getId()); return user; } catch (Exception e) { e.printStackTrace(); } return null; } /** * Sample code for issuing a user token * * @return the issued user token */ public CommunicationUserToken issueUserToken() { try { CommunicationIdentityClient communicationIdentityClient = createCommunicationIdentityClient(); CommunicationUser user = communicationIdentityClient.createUser(); List<String> scopes = new ArrayList<>(Arrays.asList("chat")); CommunicationUserToken userToken = communicationIdentityClient.issueToken(user, scopes); System.out.println("Token: " + userToken.getToken()); System.out.println("Expires On: " + userToken.getExpiresOn()); return userToken; } catch (Exception e) { e.printStackTrace(); } return null; } /** * Sample code for revoking user token */ public void revokeUserToken() { try { CommunicationIdentityClient communicationIdentityClient = createCommunicationIdentityClient(); CommunicationUser user = createNewUser(); List<String> scopes = new ArrayList<>(Arrays.asList("chat")); communicationIdentityClient.issueToken(user, scopes); communicationIdentityClient.revokeTokens(user, OffsetDateTime.now()); } catch (Exception e) { e.printStackTrace(); } } /** * Sample code for deleting user */ public void deleteUser() { try { CommunicationIdentityClient communicationIdentityClient = createCommunicationIdentityClient(); CommunicationUser user = communicationIdentityClient.createUser(); communicationIdentityClient.deleteUser(user); } catch (Exception e) { e.printStackTrace(); } } /** * Sample code for creating a sync Phone Number Client. * * @return the Phone Number Client. * @throws NoSuchAlgorithmException if Communication Client Credential HMAC not available * @throws InvalidKeyException if Communication Client Credential access key is not valid */ public PhoneNumberClient createPhoneNumberClient() throws NoSuchAlgorithmException, InvalidKeyException { String endpoint = "https: String accessToken = "SECRET"; HttpClient httpClient = new NettyAsyncHttpClientBuilder().build(); PhoneNumberClient phoneNumberClient = new PhoneNumberClientBuilder() .endpoint(endpoint) .credential(new CommunicationClientCredential(accessToken)) .httpClient(httpClient) .buildClient(); return phoneNumberClient; } /** * Sample code to get all supported countries * * @return supported countries */ public PagedIterable<PhoneNumberCountry> getSupportedCountries() { String locale = "en-us"; try { PhoneNumberClient phoneNumberClient = createPhoneNumberClient(); PagedIterable<PhoneNumberCountry> phoneNumberCountries = phoneNumberClient .listAllSupportedCountries(locale); for (PhoneNumberCountry phoneNumberCountry : phoneNumberCountries) { System.out.println("Phone Number Country Code: " + phoneNumberCountry.getCountryCode()); System.out.println("Phone Number Country Name: " + phoneNumberCountry.getLocalizedName()); } return phoneNumberCountries; } catch (Exception e) { e.printStackTrace(); } return null; } /** * Sample code to get a list of all acquired phone numbers * * @return the acquired phone numbers */ public PagedIterable<AcquiredPhoneNumber> getAcquiredPhoneNumbers() { String locale = "en-us"; try { PhoneNumberClient phoneNumberClient = createPhoneNumberClient(); PagedIterable<AcquiredPhoneNumber> acquiredPhoneNumbers = phoneNumberClient .listAllPhoneNumbers(locale); for (AcquiredPhoneNumber acquiredPhoneNumber : acquiredPhoneNumbers) { System.out.println("Acquired Phone Number: " + acquiredPhoneNumber.getPhoneNumber()); } return acquiredPhoneNumbers; } catch (Exception e) { e.printStackTrace(); } return null; } /** * Sample code to get a list of all phone plan groups * * @return phone plans groups */ public PagedIterable<PhonePlanGroup> getPhonePlanGroups() { String countryCode = "US"; String locale = "en-us"; try { PhoneNumberClient phoneNumberClient = createPhoneNumberClient(); PagedIterable<PhonePlanGroup> phonePlanGroups = phoneNumberClient .listPhonePlanGroups(countryCode, locale, true); for (PhonePlanGroup phonePlanGroup : phonePlanGroups) { System.out.println("Phone Plan GroupId: " + phonePlanGroup.getPhonePlanGroupId()); System.out.println("Phone Plan NumberType: " + phonePlanGroup.getPhoneNumberType()); } return phonePlanGroups; } catch (Exception e) { e.printStackTrace(); } return null; } /** * Sample code to get a list of all phone plan instances in a group * * @return phone plans */ public PagedIterable<PhonePlan> getPhonePlansInGroup() { String countryCode = "US"; String locale = "en-us"; String phonePlanGroupId = "PHONE_PLAN_GROUP_ID"; try { PhoneNumberClient phoneNumberClient = createPhoneNumberClient(); PagedIterable<PhonePlan> phonePlans = phoneNumberClient .listPhonePlans(countryCode, phonePlanGroupId, locale); for (PhonePlan phonePlan : phonePlans) { System.out.println("Phone Plan Id: " + phonePlan.getPhonePlanId()); System.out.println("Phone Plan Name: " + phonePlan.getLocalizedName()); System.out.println("Phone Plan Capabilities: " + phonePlan.getCapabilities()); System.out.println("Phone Plan Area Codes: " + phonePlan.getAreaCodes()); } return phonePlans; } catch (Exception e) { e.printStackTrace(); } return null; } /** * Sample code to get the location options for a phone plan * * @return Location Options for a phone plan */ public LocationOptions getPhonePlanLocationOptions() { String countryCode = "US"; String locale = "en-us"; String phonePlanGroupId = "PHONE_PLAN_GROUP_ID"; String phonePlanId = "PHONE_PLAN_ID"; try { PhoneNumberClient phoneNumberClient = createPhoneNumberClient(); LocationOptions locationOptions = phoneNumberClient .getPhonePlanLocationOptions(countryCode, phonePlanGroupId, phonePlanId, locale) .getLocationOptions(); System.out.println("Getting LocationOptions for: " + locationOptions.getLabelId()); for (LocationOptionsDetails locationOptionsDetails : locationOptions.getOptions()) { System.out.println(locationOptionsDetails.getValue()); for (LocationOptions locationOptions1 : locationOptionsDetails.getLocationOptions()) { System.out.println("Getting LocationOptions for: " + locationOptions1.getLabelId()); for (LocationOptionsDetails locationOptionsDetails1 : locationOptions1.getOptions()) { System.out.println(locationOptionsDetails1.getValue()); } } } return locationOptions; } catch (Exception e) { e.printStackTrace(); } return null; } /** * Sample code to get the area codes for a location * * @return Area Codes for a location */ public AreaCodes getAreaCodes() { String countryCode = "US"; String phonePlanId = "PHONE_PLAN_ID"; List<LocationOptionsQuery> locationOptions = new ArrayList<>(); LocationOptionsQuery query = new LocationOptionsQuery(); query.setLabelId("state"); query.setOptionsValue("LOCATION_OPTION_STATE"); locationOptions.add(query); query = new LocationOptionsQuery(); query.setLabelId("city"); query.setOptionsValue("LOCATION_OPTION_CITY"); locationOptions.add(query); try { PhoneNumberClient phoneNumberClient = createPhoneNumberClient(); AreaCodes areaCodes = phoneNumberClient .getAllAreaCodes("selection", countryCode, phonePlanId, locationOptions); for (String areaCode : areaCodes.getPrimaryAreaCodes()) { System.out.println(areaCode); } return areaCodes; } catch (Exception e) { e.printStackTrace(); } return null; } /** * Sample code to create a phone number search * * @return PhoneNumberSearch for the phone plan */ public PhoneNumberSearch createPhoneNumberSearch() { String phonePlanId = "PHONE_PLAN_ID"; List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(phonePlanId); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode("AREA_CODE_FOR_SEARCH") .setDescription("DESCRIPTION_FOR_SEARCH") .setDisplayName("NAME_FOR_SEARCH") .setPhonePlanIds(phonePlanIds) .setQuantity(2); try { PhoneNumberClient phoneNumberClient = createPhoneNumberClient(); CreateSearchResponse createSearchResponse = phoneNumberClient.createSearch(createSearchOptions); System.out.println("SearchId: " + createSearchResponse.getSearchId()); PhoneNumberSearch phoneNumberSearch = phoneNumberClient.getSearchById(createSearchResponse.getSearchId()); for (String phoneNumber : phoneNumberSearch.getPhoneNumbers()) { System.out.println("Phone Number: " + phoneNumber); } return phoneNumberSearch; } catch (Exception e) { e.printStackTrace(); } return null; } /** * Sample code to purchase a phone number search */ public void purchasePhoneNumberSearch() { String phoneNumberSearchId = "SEARCH_ID_TO_PURCHASE"; try { PhoneNumberClient phoneNumberClient = createPhoneNumberClient(); phoneNumberClient.purchaseSearch(phoneNumberSearchId); } catch (Exception e) { e.printStackTrace(); } } /** * Sample code to configure a phone number */ }
This can be simplified. ```suggestion List<HttpHeader> httpHeaderList = new ArrayList<>(); clientOptions.getHeaders().forEach(header -> httpHeaderList.add(new HttpHeader(header.getName(), header.getValue()))); policies.add(new AddHeadersPolicy(new HttpHeaders(httpHeaderList))); ```
public KeyVaultAccessControlAsyncClient buildAsyncClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; URL buildEndpoint = getBuildEndpoint(buildConfiguration); if (buildEndpoint == null) { throw logger.logExceptionAsError( new IllegalStateException( KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED))); } if (pipeline != null) { return new KeyVaultAccessControlAsyncClient(vaultUrl, pipeline); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); String applicationId = clientOptions == null ? httpLogOptions.getApplicationId() : clientOptions.getApplicationId(); policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? new RetryPolicy() : retryPolicy); this.policies.add(new KeyVaultCredentialPolicy(credential)); policies.addAll(this.policies); if (clientOptions != null) { Iterable<Header> headers = clientOptions.getHeaders(); if (headers.iterator().hasNext()) { List<HttpHeader> httpHeaderList = new ArrayList<>(); headers.forEach(header -> httpHeaderList.add(new HttpHeader(header.getName(), header.getValue()))); policies.add(new AddHeadersPolicy(new HttpHeaders(httpHeaderList))); } } HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline buildPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new KeyVaultAccessControlAsyncClient(vaultUrl, buildPipeline); }
}
public KeyVaultAccessControlAsyncClient buildAsyncClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; URL buildEndpoint = getBuildEndpoint(buildConfiguration); if (buildEndpoint == null) { throw logger.logExceptionAsError( new IllegalStateException( KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED))); } if (pipeline != null) { return new KeyVaultAccessControlAsyncClient(vaultUrl, pipeline); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); String applicationId = clientOptions == null ? httpLogOptions.getApplicationId() : clientOptions.getApplicationId(); policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? new RetryPolicy() : retryPolicy); this.policies.add(new KeyVaultCredentialPolicy(credential)); policies.addAll(this.policies); if (clientOptions != null) { List<HttpHeader> httpHeaderList = new ArrayList<>(); clientOptions.getHeaders().forEach(header -> httpHeaderList.add(new HttpHeader(header.getName(), header.getValue()))); policies.add(new AddHeadersPolicy(new HttpHeaders(httpHeaderList))); } HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline buildPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new KeyVaultAccessControlAsyncClient(vaultUrl, buildPipeline); }
class KeyVaultAccessControlClientBuilder { private static final String AZURE_KEY_VAULT_RBAC = "azure-key-vault-administration.properties"; private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private final ClientLogger logger = new ClientLogger(KeyVaultAccessControlClientBuilder.class); private final List<HttpPipelinePolicy> policies; private final Map<String, String> properties; private TokenCredential credential; private HttpPipeline pipeline; private URL vaultUrl; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private RetryPolicy retryPolicy; private Configuration configuration; private ClientOptions clientOptions; /** * Creates a {@link KeyVaultAccessControlClientBuilder} instance that is able to configure and construct * instances of {@link KeyVaultAccessControlClient} and {@link KeyVaultAccessControlAsyncClient}. */ public KeyVaultAccessControlClientBuilder() { retryPolicy = new RetryPolicy(); httpLogOptions = new HttpLogOptions(); policies = new ArrayList<>(); properties = CoreUtils.getProperties(AZURE_KEY_VAULT_RBAC); } /** * Creates an {@link KeyVaultAccessControlClient} based on options set in the Builder. Every time {@code * buildClient()} is called a new instance of {@link KeyVaultAccessControlClient} is created. * <p> * If {@link * {@link * builder settings are ignored. * * @return An {@link KeyVaultAccessControlClient} with the options set from the builder. * @throws NullPointerException If {@code vaultUrl} is {@code null}. */ public KeyVaultAccessControlClient buildClient() { return new KeyVaultAccessControlClient(buildAsyncClient()); } /** * Creates a {@link KeyVaultAccessControlAsyncClient} based on options set in the Builder. Every time {@code * buildAsyncClient()} is called a new instance of {@link KeyVaultAccessControlAsyncClient} is created. * <p> * If {@link * {@link * other builder settings are ignored. * * @return An {@link KeyVaultAccessControlAsyncClient} with the options set from the builder. * @throws NullPointerException If {@code vaultUrl} is {@code null}. */ /** * Sets the URL to the Key Vault on which the client operates. Appears as "DNS Name" in the Azure portal. * * @param vaultUrl The vault URL is used as destination on Azure to send requests to. * @return The updated {@link KeyVaultAccessControlClientBuilder} object. * @throws IllegalArgumentException If {@code vaultUrl} is null or it cannot be parsed into a valid URL. */ public KeyVaultAccessControlClientBuilder vaultUrl(String vaultUrl) { try { this.vaultUrl = new URL(vaultUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning( new IllegalArgumentException("The Azure Key Vault URL is malformed.", e)); } return this; } /** * Sets the credential to use when authenticating HTTP requests. * * @param credential The credential to use for authenticating HTTP requests. * @return The updated {@link KeyVaultAccessControlClientBuilder} object. * @throws NullPointerException If {@code credential} is {@code null}. */ public KeyVaultAccessControlClientBuilder credential(TokenCredential credential) { Objects.requireNonNull(credential); this.credential = credential; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return The updated {@link KeyVaultAccessControlClientBuilder} object. */ public KeyVaultAccessControlClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after and {@link KeyVaultAccessControlClient} * {@link KeyVaultAccessControlAsyncClient} required policies. * * @param policy The {@link HttpPipelinePolicy policy} to be added. * @return The updated {@link KeyVaultAccessControlClientBuilder} object. * @throws NullPointerException If {@code policy} is {@code null}. */ public KeyVaultAccessControlClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return The updated {@link KeyVaultAccessControlClientBuilder} object. * @throws NullPointerException If {@code client} is {@code null}. */ public KeyVaultAccessControlClientBuilder httpClient(HttpClient client) { Objects.requireNonNull(client); this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * <p> * If {@code pipeline} is set, all other settings are ignored, aside from * {@link KeyVaultAccessControlClientBuilder * or {@link KeyVaultAccessControlAsyncClient}. * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated {@link KeyVaultAccessControlClientBuilder} object. */ public KeyVaultAccessControlClientBuilder pipeline(HttpPipeline pipeline) { Objects.requireNonNull(pipeline); this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * <p> * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to get configuration details. * @return The updated {@link KeyVaultAccessControlClientBuilder} object. */ public KeyVaultAccessControlClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link RetryPolicy} that is used when each request is sent. * <p> * The default retry policy will be used in the pipeline, if not provided. * * @param retryPolicy User's retry policy applied to each request. * @return The updated {@link KeyVaultAccessControlClientBuilder} object. * @throws NullPointerException If the specified {@code retryPolicy} is null. */ public KeyVaultAccessControlClientBuilder retryPolicy(RetryPolicy retryPolicy) { Objects.requireNonNull(retryPolicy, "The retry policy cannot be bull"); this.retryPolicy = retryPolicy; return this; } /** * Sets the various {@link ClientOptions options} to be set on this client. * * @param clientOptions the {@link ClientOptions} to be set on this client. * @return The updated KeyVaultAccessControlClientBuilder object. */ public KeyVaultAccessControlClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } private URL getBuildEndpoint(Configuration configuration) { if (vaultUrl != null) { return vaultUrl; } String configEndpoint = configuration.get("AZURE_KEYVAULT_ENDPOINT"); if (CoreUtils.isNullOrEmpty(configEndpoint)) { return null; } try { return new URL(configEndpoint); } catch (MalformedURLException ex) { return null; } } }
class KeyVaultAccessControlClientBuilder { private static final String AZURE_KEY_VAULT_RBAC = "azure-key-vault-administration.properties"; private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private final ClientLogger logger = new ClientLogger(KeyVaultAccessControlClientBuilder.class); private final List<HttpPipelinePolicy> policies; private final Map<String, String> properties; private TokenCredential credential; private HttpPipeline pipeline; private URL vaultUrl; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private RetryPolicy retryPolicy; private Configuration configuration; private ClientOptions clientOptions; /** * Creates a {@link KeyVaultAccessControlClientBuilder} instance that is able to configure and construct * instances of {@link KeyVaultAccessControlClient} and {@link KeyVaultAccessControlAsyncClient}. */ public KeyVaultAccessControlClientBuilder() { retryPolicy = new RetryPolicy(); httpLogOptions = new HttpLogOptions(); policies = new ArrayList<>(); properties = CoreUtils.getProperties(AZURE_KEY_VAULT_RBAC); } /** * Creates an {@link KeyVaultAccessControlClient} based on options set in the Builder. Every time {@code * buildClient()} is called a new instance of {@link KeyVaultAccessControlClient} is created. * <p> * If {@link * {@link * builder settings are ignored. * * @return An {@link KeyVaultAccessControlClient} with the options set from the builder. * @throws NullPointerException If {@code vaultUrl} is {@code null}. */ public KeyVaultAccessControlClient buildClient() { return new KeyVaultAccessControlClient(buildAsyncClient()); } /** * Creates a {@link KeyVaultAccessControlAsyncClient} based on options set in the Builder. Every time {@code * buildAsyncClient()} is called a new instance of {@link KeyVaultAccessControlAsyncClient} is created. * <p> * If {@link * {@link * other builder settings are ignored. * * @return An {@link KeyVaultAccessControlAsyncClient} with the options set from the builder. * @throws NullPointerException If {@code vaultUrl} is {@code null}. */ /** * Sets the URL to the Key Vault on which the client operates. Appears as "DNS Name" in the Azure portal. * * @param vaultUrl The vault URL is used as destination on Azure to send requests to. * @return The updated {@link KeyVaultAccessControlClientBuilder} object. * @throws IllegalArgumentException If {@code vaultUrl} is null or it cannot be parsed into a valid URL. */ public KeyVaultAccessControlClientBuilder vaultUrl(String vaultUrl) { try { this.vaultUrl = new URL(vaultUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning( new IllegalArgumentException("The Azure Key Vault URL is malformed.", e)); } return this; } /** * Sets the credential to use when authenticating HTTP requests. * * @param credential The credential to use for authenticating HTTP requests. * @return The updated {@link KeyVaultAccessControlClientBuilder} object. * @throws NullPointerException If {@code credential} is {@code null}. */ public KeyVaultAccessControlClientBuilder credential(TokenCredential credential) { Objects.requireNonNull(credential); this.credential = credential; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return The updated {@link KeyVaultAccessControlClientBuilder} object. */ public KeyVaultAccessControlClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after and {@link KeyVaultAccessControlClient} * {@link KeyVaultAccessControlAsyncClient} required policies. * * @param policy The {@link HttpPipelinePolicy policy} to be added. * @return The updated {@link KeyVaultAccessControlClientBuilder} object. * @throws NullPointerException If {@code policy} is {@code null}. */ public KeyVaultAccessControlClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return The updated {@link KeyVaultAccessControlClientBuilder} object. * @throws NullPointerException If {@code client} is {@code null}. */ public KeyVaultAccessControlClientBuilder httpClient(HttpClient client) { Objects.requireNonNull(client); this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * <p> * If {@code pipeline} is set, all other settings are ignored, aside from * {@link KeyVaultAccessControlClientBuilder * or {@link KeyVaultAccessControlAsyncClient}. * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated {@link KeyVaultAccessControlClientBuilder} object. */ public KeyVaultAccessControlClientBuilder pipeline(HttpPipeline pipeline) { Objects.requireNonNull(pipeline); this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * <p> * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to get configuration details. * @return The updated {@link KeyVaultAccessControlClientBuilder} object. */ public KeyVaultAccessControlClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link RetryPolicy} that is used when each request is sent. * <p> * The default retry policy will be used in the pipeline, if not provided. * * @param retryPolicy User's retry policy applied to each request. * @return The updated {@link KeyVaultAccessControlClientBuilder} object. * @throws NullPointerException If the specified {@code retryPolicy} is null. */ public KeyVaultAccessControlClientBuilder retryPolicy(RetryPolicy retryPolicy) { Objects.requireNonNull(retryPolicy, "The retry policy cannot be bull"); this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link ClientOptions} which enables various options to be set on the client. For example setting an * {@code applicationId} using {@link ClientOptions * the {@link UserAgentPolicy} for telemetry/monitoring purposes. * * <p>More About <a href="https: * * @param clientOptions the {@link ClientOptions} to be set on the client. * @return The updated KeyVaultAccessControlClientBuilder object. */ public KeyVaultAccessControlClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } private URL getBuildEndpoint(Configuration configuration) { if (vaultUrl != null) { return vaultUrl; } String configEndpoint = configuration.get("AZURE_KEYVAULT_ENDPOINT"); if (CoreUtils.isNullOrEmpty(configEndpoint)) { return null; } try { return new URL(configEndpoint); } catch (MalformedURLException ex) { return null; } } }
Don't need this check. `forEach()` will be executed only if there are elements in the list. Update this in other builders too.
public KeyVaultBackupAsyncClient buildAsyncClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; URL buildEndpoint = getBuildEndpoint(buildConfiguration); if (buildEndpoint == null) { throw logger.logExceptionAsError( new IllegalStateException( KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED))); } if (pipeline != null) { return new KeyVaultBackupAsyncClient(vaultUrl, pipeline); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); String applicationId = clientOptions == null ? httpLogOptions.getApplicationId() : clientOptions.getApplicationId(); policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? new RetryPolicy() : retryPolicy); this.policies.add(new KeyVaultCredentialPolicy(credential)); policies.addAll(this.policies); if (clientOptions != null) { Iterable<Header> headers = clientOptions.getHeaders(); if (headers.iterator().hasNext()) { List<HttpHeader> httpHeaderList = new ArrayList<>(); headers.forEach(header -> httpHeaderList.add(new HttpHeader(header.getName(), header.getValue()))); policies.add(new AddHeadersPolicy(new HttpHeaders(httpHeaderList))); } } HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline buildPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new KeyVaultBackupAsyncClient(vaultUrl, buildPipeline); }
if (headers.iterator().hasNext()) {
public KeyVaultBackupAsyncClient buildAsyncClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; URL buildEndpoint = getBuildEndpoint(buildConfiguration); if (buildEndpoint == null) { throw logger.logExceptionAsError( new IllegalStateException( KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED))); } if (pipeline != null) { return new KeyVaultBackupAsyncClient(vaultUrl, pipeline); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); String applicationId = clientOptions == null ? httpLogOptions.getApplicationId() : clientOptions.getApplicationId(); policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? new RetryPolicy() : retryPolicy); this.policies.add(new KeyVaultCredentialPolicy(credential)); policies.addAll(this.policies); if (clientOptions != null) { List<HttpHeader> httpHeaderList = new ArrayList<>(); clientOptions.getHeaders().forEach(header -> httpHeaderList.add(new HttpHeader(header.getName(), header.getValue()))); policies.add(new AddHeadersPolicy(new HttpHeaders(httpHeaderList))); } HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline buildPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new KeyVaultBackupAsyncClient(vaultUrl, buildPipeline); }
class KeyVaultBackupClientBuilder { private static final String AZURE_KEY_VAULT_RBAC = "azure-key-vault-administration.properties"; private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private final ClientLogger logger = new ClientLogger(KeyVaultBackupClientBuilder.class); private final List<HttpPipelinePolicy> policies; private final Map<String, String> properties; private TokenCredential credential; private HttpPipeline pipeline; private URL vaultUrl; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private RetryPolicy retryPolicy; private Configuration configuration; private ClientOptions clientOptions; /** * Creates a {@link KeyVaultBackupClientBuilder} instance that is able to configure and construct instances of * {@link KeyVaultBackupClient} and {@link KeyVaultBackupAsyncClient}. */ public KeyVaultBackupClientBuilder() { retryPolicy = new RetryPolicy(); httpLogOptions = new HttpLogOptions(); policies = new ArrayList<>(); properties = CoreUtils.getProperties(AZURE_KEY_VAULT_RBAC); } /** * Creates an {@link KeyVaultBackupClient} based on options set in the Builder. Every time {@code buildClient()} * is called a new instance of {@link KeyVaultBackupClient} is created. * <p> * If {@link * {@link * builder settings are ignored. * * @return A {@link KeyVaultBackupClient} with the options set from the builder. * @throws NullPointerException If {@code vaultUrl} is {@code null}. */ public KeyVaultBackupClient buildClient() { return new KeyVaultBackupClient(buildAsyncClient()); } /** * Creates a {@link KeyVaultBackupAsyncClient} based on options set in the Builder. Every time {@code * buildAsyncClient()} is called a new instance of {@link KeyVaultBackupAsyncClient} is created. * <p> * If {@link * {@link * builder settings are ignored. * * @return A {@link KeyVaultBackupAsyncClient} with the options set from the builder. * @throws NullPointerException If {@code vaultUrl} is {@code null}. */ /** * Sets the URL to the Key Vault on which the client operates. Appears as "DNS Name" in the Azure portal. * * @param vaultUrl The vault URL is used as destination on Azure to send requests to. * @return The updated {@link KeyVaultBackupClientBuilder} object. * @throws IllegalArgumentException If {@code vaultUrl} is null or it cannot be parsed into a valid URL. */ public KeyVaultBackupClientBuilder vaultUrl(String vaultUrl) { try { this.vaultUrl = new URL(vaultUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning( new IllegalArgumentException("The Azure Key Vault URL is malformed.", e)); } return this; } /** * Sets the credential to use when authenticating HTTP requests. * * @param credential The credential to use for authenticating HTTP requests. * @return The updated {@link KeyVaultBackupClientBuilder} object. * @throws NullPointerException If {@code credential} is {@code null}. */ public KeyVaultBackupClientBuilder credential(TokenCredential credential) { Objects.requireNonNull(credential); this.credential = credential; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return The updated {@link KeyVaultBackupClientBuilder} object. */ public KeyVaultBackupClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after and {@link KeyVaultBackupClient} * {@link KeyVaultBackupAsyncClient} required policies. * * @param policy The {@link HttpPipelinePolicy policy} to be added. * @return The updated {@link KeyVaultBackupClientBuilder} object. * @throws NullPointerException If {@code policy} is {@code null}. */ public KeyVaultBackupClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return The updated {@link KeyVaultBackupClientBuilder} object. * @throws NullPointerException If {@code client} is {@code null}. */ public KeyVaultBackupClientBuilder httpClient(HttpClient client) { Objects.requireNonNull(client); this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * <p> * If {@code pipeline} is set, all other settings are ignored, aside from * {@link KeyVaultBackupClientBuilder * {@link KeyVaultBackupAsyncClient}. * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated {@link KeyVaultBackupClientBuilder} object. */ public KeyVaultBackupClientBuilder pipeline(HttpPipeline pipeline) { Objects.requireNonNull(pipeline); this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * <p> * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to get configuration details. * @return The updated {@link KeyVaultBackupClientBuilder} object. */ public KeyVaultBackupClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link RetryPolicy} that is used when each request is sent. * <p> * The default retry policy will be used in the pipeline, if not provided. * * @param retryPolicy User's retry policy applied to each request. * @return The updated {@link KeyVaultBackupClientBuilder} object. * @throws NullPointerException If the specified {@code retryPolicy} is null. */ public KeyVaultBackupClientBuilder retryPolicy(RetryPolicy retryPolicy) { Objects.requireNonNull(retryPolicy, "The retry policy cannot be bull"); this.retryPolicy = retryPolicy; return this; } /** * Sets the various {@link ClientOptions options} to be set on this client. * * @param clientOptions the {@link ClientOptions} to be set on this client. * @return The updated KeyVaultBackupClientBuilder object. */ public KeyVaultBackupClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } private URL getBuildEndpoint(Configuration configuration) { if (vaultUrl != null) { return vaultUrl; } String configEndpoint = configuration.get("AZURE_KEYVAULT_ENDPOINT"); if (CoreUtils.isNullOrEmpty(configEndpoint)) { return null; } try { return new URL(configEndpoint); } catch (MalformedURLException ex) { return null; } } }
class KeyVaultBackupClientBuilder { private static final String AZURE_KEY_VAULT_RBAC = "azure-key-vault-administration.properties"; private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private final ClientLogger logger = new ClientLogger(KeyVaultBackupClientBuilder.class); private final List<HttpPipelinePolicy> policies; private final Map<String, String> properties; private TokenCredential credential; private HttpPipeline pipeline; private URL vaultUrl; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private RetryPolicy retryPolicy; private Configuration configuration; private ClientOptions clientOptions; /** * Creates a {@link KeyVaultBackupClientBuilder} instance that is able to configure and construct instances of * {@link KeyVaultBackupClient} and {@link KeyVaultBackupAsyncClient}. */ public KeyVaultBackupClientBuilder() { retryPolicy = new RetryPolicy(); httpLogOptions = new HttpLogOptions(); policies = new ArrayList<>(); properties = CoreUtils.getProperties(AZURE_KEY_VAULT_RBAC); } /** * Creates an {@link KeyVaultBackupClient} based on options set in the Builder. Every time {@code buildClient()} * is called a new instance of {@link KeyVaultBackupClient} is created. * <p> * If {@link * {@link * builder settings are ignored. * * @return A {@link KeyVaultBackupClient} with the options set from the builder. * @throws NullPointerException If {@code vaultUrl} is {@code null}. */ public KeyVaultBackupClient buildClient() { return new KeyVaultBackupClient(buildAsyncClient()); } /** * Creates a {@link KeyVaultBackupAsyncClient} based on options set in the Builder. Every time {@code * buildAsyncClient()} is called a new instance of {@link KeyVaultBackupAsyncClient} is created. * <p> * If {@link * {@link * builder settings are ignored. * * @return A {@link KeyVaultBackupAsyncClient} with the options set from the builder. * @throws NullPointerException If {@code vaultUrl} is {@code null}. */ /** * Sets the URL to the Key Vault on which the client operates. Appears as "DNS Name" in the Azure portal. * * @param vaultUrl The vault URL is used as destination on Azure to send requests to. * @return The updated {@link KeyVaultBackupClientBuilder} object. * @throws IllegalArgumentException If {@code vaultUrl} is null or it cannot be parsed into a valid URL. */ public KeyVaultBackupClientBuilder vaultUrl(String vaultUrl) { try { this.vaultUrl = new URL(vaultUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsWarning( new IllegalArgumentException("The Azure Key Vault URL is malformed.", e)); } return this; } /** * Sets the credential to use when authenticating HTTP requests. * * @param credential The credential to use for authenticating HTTP requests. * @return The updated {@link KeyVaultBackupClientBuilder} object. * @throws NullPointerException If {@code credential} is {@code null}. */ public KeyVaultBackupClientBuilder credential(TokenCredential credential) { Objects.requireNonNull(credential); this.credential = credential; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return The updated {@link KeyVaultBackupClientBuilder} object. */ public KeyVaultBackupClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after and {@link KeyVaultBackupClient} * {@link KeyVaultBackupAsyncClient} required policies. * * @param policy The {@link HttpPipelinePolicy policy} to be added. * @return The updated {@link KeyVaultBackupClientBuilder} object. * @throws NullPointerException If {@code policy} is {@code null}. */ public KeyVaultBackupClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return The updated {@link KeyVaultBackupClientBuilder} object. * @throws NullPointerException If {@code client} is {@code null}. */ public KeyVaultBackupClientBuilder httpClient(HttpClient client) { Objects.requireNonNull(client); this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * <p> * If {@code pipeline} is set, all other settings are ignored, aside from * {@link KeyVaultBackupClientBuilder * {@link KeyVaultBackupAsyncClient}. * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated {@link KeyVaultBackupClientBuilder} object. */ public KeyVaultBackupClientBuilder pipeline(HttpPipeline pipeline) { Objects.requireNonNull(pipeline); this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * <p> * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to get configuration details. * @return The updated {@link KeyVaultBackupClientBuilder} object. */ public KeyVaultBackupClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link RetryPolicy} that is used when each request is sent. * <p> * The default retry policy will be used in the pipeline, if not provided. * * @param retryPolicy User's retry policy applied to each request. * @return The updated {@link KeyVaultBackupClientBuilder} object. * @throws NullPointerException If the specified {@code retryPolicy} is null. */ public KeyVaultBackupClientBuilder retryPolicy(RetryPolicy retryPolicy) { Objects.requireNonNull(retryPolicy, "The retry policy cannot be bull"); this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link ClientOptions} which enables various options to be set on the client. For example setting an * {@code applicationId} using {@link ClientOptions * the {@link UserAgentPolicy} for telemetry/monitoring purposes. * * <p>More About <a href="https: * * @param clientOptions the {@link ClientOptions} to be set on the client. * @return The updated KeyVaultBackupClientBuilder object. */ public KeyVaultBackupClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } private URL getBuildEndpoint(Configuration configuration) { if (vaultUrl != null) { return vaultUrl; } String configEndpoint = configuration.get("AZURE_KEYVAULT_ENDPOINT"); if (CoreUtils.isNullOrEmpty(configEndpoint)) { return null; } try { return new URL(configEndpoint); } catch (MalformedURLException ex) { return null; } } }
any reason this is preferred over `cause instance of ClosedChannelException` ?
public void onException(final RxDocumentServiceRequest request, Throwable exception) { checkNotNull(request, "expect non-null request"); checkNotNull(exception, "expect non-null exception"); if (exception instanceof GoneException) { final Throwable cause = exception.getCause(); if (cause != null) { if (cause instanceof IOException) { final Class<?> type = cause.getClass(); if (type == ClosedChannelException.class) { this.onConnectionEvent(RntbdConnectionEvent.READ_EOF, request, exception); } else { if (logger.isDebugEnabled()) { logger.debug("Will not raise the connection state change event for error {}", cause); } } } } } }
if (type == ClosedChannelException.class) {
public void onException(final RxDocumentServiceRequest request, Throwable exception) { checkNotNull(request, "expect non-null request"); checkNotNull(exception, "expect non-null exception"); if (exception instanceof GoneException) { final Throwable cause = exception.getCause(); if (cause != null) { if (cause instanceof IOException) { if (cause instanceof ClosedChannelException) { this.onConnectionEvent(RntbdConnectionEvent.READ_EOF, request, exception); } else { if (logger.isDebugEnabled()) { logger.debug("Will not raise the connection state change event for error {}", cause); } } } } } }
class RntbdConnectionStateListener { private static final Logger logger = LoggerFactory.getLogger(RntbdConnectionStateListener.class); private final IAddressResolver addressResolver; private final RntbdEndpoint endpoint; private final Set<PartitionKeyRangeIdentity> partitionAddressCache; private final AtomicBoolean updatingAddressCache = new AtomicBoolean(false); public RntbdConnectionStateListener(final IAddressResolver addressResolver, final RntbdEndpoint endpoint) { this.addressResolver = checkNotNull(addressResolver, "expected non-null addressResolver"); this.endpoint = checkNotNull(endpoint, "expected non-null endpoint"); this.partitionAddressCache = ConcurrentHashMap.newKeySet(); } public void updateConnectionState(final RxDocumentServiceRequest request) { checkNotNull("expect non-null request"); PartitionKeyRangeIdentity partitionKeyRangeIdentity = this.getPartitionKeyRangeIdentity(request); checkNotNull(partitionKeyRangeIdentity, "expected non-null partitionKeyRangeIdentity"); this.partitionAddressCache.add(partitionKeyRangeIdentity); if (logger.isDebugEnabled()) { logger.debug( "updateConnectionState({\"time\":{},\"endpoint\":{},\"partitionKeyRangeIdentity\":{}})", RntbdObjectMapper.toJson(Instant.now()), RntbdObjectMapper.toJson(endpoint), RntbdObjectMapper.toJson(partitionKeyRangeIdentity)); } } private PartitionKeyRangeIdentity getPartitionKeyRangeIdentity(final RxDocumentServiceRequest request) { checkNotNull(request, "expect non-null request"); PartitionKeyRangeIdentity partitionKeyRangeIdentity = request.getPartitionKeyRangeIdentity(); if (partitionKeyRangeIdentity == null) { final String partitionKeyRange = checkNotNull( request.requestContext.resolvedPartitionKeyRange, "expected non-null resolvedPartitionKeyRange").getId(); final String collectionRid = request.requestContext.resolvedCollectionRid; partitionKeyRangeIdentity = collectionRid != null ? new PartitionKeyRangeIdentity(collectionRid, partitionKeyRange) : new PartitionKeyRangeIdentity(partitionKeyRange); } return partitionKeyRangeIdentity; } private void onConnectionEvent(final RntbdConnectionEvent event, final RxDocumentServiceRequest request, final Throwable exception) { checkNotNull(request, "expected non-null exception"); checkNotNull(exception, "expected non-null exception"); if (event == RntbdConnectionEvent.READ_EOF) { if (!this.endpoint.isClosed()) { if (logger.isDebugEnabled()) { logger.debug("onConnectionEvent({\"event\":{},\"time\":{},\"endpoint\":{},\"cause\":{})", event, RntbdObjectMapper.toJson(Instant.now()), RntbdObjectMapper.toJson(this.endpoint), RntbdObjectMapper.toJson(exception)); } this.updateAddressCache(request); } } } private void updateAddressCache(final RxDocumentServiceRequest request) { try{ if (this.updatingAddressCache.compareAndSet(false, true)) { if (logger.isDebugEnabled()) { logger.debug( "updateAddressCache ({\"time\":{},\"endpoint\":{},\"partitionAddressCache\":{}})", RntbdObjectMapper.toJson(Instant.now()), RntbdObjectMapper.toJson(this.endpoint), RntbdObjectMapper.toJson(this.partitionAddressCache)); } this.addressResolver.remove(request, this.partitionAddressCache); this.partitionAddressCache.clear(); } } finally { this.updatingAddressCache.set(false); } } }
class RntbdConnectionStateListener { private static final Logger logger = LoggerFactory.getLogger(RntbdConnectionStateListener.class); private final IAddressResolver addressResolver; private final RntbdEndpoint endpoint; private final Set<PartitionKeyRangeIdentity> partitionAddressCache; private final AtomicBoolean updatingAddressCache = new AtomicBoolean(false); public RntbdConnectionStateListener(final IAddressResolver addressResolver, final RntbdEndpoint endpoint) { this.addressResolver = checkNotNull(addressResolver, "expected non-null addressResolver"); this.endpoint = checkNotNull(endpoint, "expected non-null endpoint"); this.partitionAddressCache = ConcurrentHashMap.newKeySet(); } public void updateConnectionState(final RxDocumentServiceRequest request) { checkNotNull("expect non-null request"); PartitionKeyRangeIdentity partitionKeyRangeIdentity = this.getPartitionKeyRangeIdentity(request); checkNotNull(partitionKeyRangeIdentity, "expected non-null partitionKeyRangeIdentity"); this.partitionAddressCache.add(partitionKeyRangeIdentity); if (logger.isDebugEnabled()) { logger.debug( "updateConnectionState({\"time\":{},\"endpoint\":{},\"partitionKeyRangeIdentity\":{}})", RntbdObjectMapper.toJson(Instant.now()), RntbdObjectMapper.toJson(endpoint), RntbdObjectMapper.toJson(partitionKeyRangeIdentity)); } } private PartitionKeyRangeIdentity getPartitionKeyRangeIdentity(final RxDocumentServiceRequest request) { checkNotNull(request, "expect non-null request"); PartitionKeyRangeIdentity partitionKeyRangeIdentity = request.getPartitionKeyRangeIdentity(); if (partitionKeyRangeIdentity == null) { final String partitionKeyRange = checkNotNull( request.requestContext.resolvedPartitionKeyRange, "expected non-null resolvedPartitionKeyRange").getId(); final String collectionRid = request.requestContext.resolvedCollectionRid; partitionKeyRangeIdentity = collectionRid != null ? new PartitionKeyRangeIdentity(collectionRid, partitionKeyRange) : new PartitionKeyRangeIdentity(partitionKeyRange); } return partitionKeyRangeIdentity; } private void onConnectionEvent(final RntbdConnectionEvent event, final RxDocumentServiceRequest request, final Throwable exception) { checkNotNull(request, "expected non-null exception"); checkNotNull(exception, "expected non-null exception"); if (event == RntbdConnectionEvent.READ_EOF) { if (!this.endpoint.isClosed()) { if (logger.isDebugEnabled()) { logger.debug("onConnectionEvent({\"event\":{},\"time\":{},\"endpoint\":{},\"cause\":{})", event, RntbdObjectMapper.toJson(Instant.now()), RntbdObjectMapper.toJson(this.endpoint), RntbdObjectMapper.toJson(exception)); } this.updateAddressCache(request); } } } private void updateAddressCache(final RxDocumentServiceRequest request) { try{ if (this.updatingAddressCache.compareAndSet(false, true)) { if (logger.isDebugEnabled()) { logger.debug( "updateAddressCache ({\"time\":{},\"endpoint\":{},\"partitionAddressCache\":{}})", RntbdObjectMapper.toJson(Instant.now()), RntbdObjectMapper.toJson(this.endpoint), RntbdObjectMapper.toJson(this.partitionAddressCache)); } this.addressResolver.remove(request, this.partitionAddressCache); this.partitionAddressCache.clear(); } } finally { this.updatingAddressCache.set(false); } } }
Could we add one test where use set a Header and it gets added to HttpRequest ?
HttpPipeline setupPipeline() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; final List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); String applicationId = clientOptions == null ? httpLogOptions.getApplicationId() : clientOptions.getApplicationId(); policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy); policies.add(new KeyVaultCredentialPolicy(credential)); policies.addAll(this.policies); if (clientOptions != null) { List<HttpHeader> httpHeaderList = new ArrayList<>(); clientOptions.getHeaders().forEach(header -> httpHeaderList.add(new HttpHeader(header.getName(), header.getValue()))); policies.add(new AddHeadersPolicy(new HttpHeaders(httpHeaderList))); } HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); }
List<HttpHeader> httpHeaderList = new ArrayList<>();
HttpPipeline setupPipeline() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; final List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); String applicationId = clientOptions == null ? httpLogOptions.getApplicationId() : clientOptions.getApplicationId(); policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy); policies.add(new KeyVaultCredentialPolicy(credential)); policies.addAll(this.policies); if (clientOptions != null) { List<HttpHeader> httpHeaderList = new ArrayList<>(); clientOptions.getHeaders().forEach(header -> httpHeaderList.add(new HttpHeader(header.getName(), header.getValue()))); policies.add(new AddHeadersPolicy(new HttpHeaders(httpHeaderList))); } HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); }
class CryptographyClientBuilder { final List<HttpPipelinePolicy> policies; final Map<String, String> properties; private final ClientLogger logger = new ClientLogger(CryptographyClientBuilder.class); private static final String AZURE_KEY_VAULT_KEYS = "azure-key-vault-keys.properties"; private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private TokenCredential credential; private HttpPipeline pipeline; private String keyId; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private RetryPolicy retryPolicy; private Configuration configuration; private CryptographyServiceVersion version; private ClientOptions clientOptions; /** * The constructor with defaults. */ public CryptographyClientBuilder() { retryPolicy = new RetryPolicy(); httpLogOptions = new HttpLogOptions(); policies = new ArrayList<>(); properties = CoreUtils.getProperties(AZURE_KEY_VAULT_KEYS); } /** * Creates a {@link CryptographyClient} based on options set in the builder. * Every time {@code buildClient()} is called, a new instance of {@link CryptographyClient} is created. * * <p>If {@link CryptographyClientBuilder * ({@link CryptographyClientBuilder * are used to create the {@link CryptographyClient client}. All other builder settings are ignored. If * {@code pipeline} is not set, then * ({@link CryptographyClientBuilder * ({@link CryptographyClientBuilder * {@link CryptographyClient client}.</p> * * @return A {@link CryptographyClient} with the options set from the builder. * @throws IllegalStateException If {@link CryptographyClientBuilder * either of ({@link CryptographyClientBuilder */ public CryptographyClient buildClient() { return new CryptographyClient(buildAsyncClient()); } /** * Creates a {@link CryptographyAsyncClient} based on options set in the builder. Every time * {@link * * <p>If {@link CryptographyClientBuilder * ({@link CryptographyClientBuilder * CryptographyAsyncClient async client}. All other builder settings are ignored. If {@code pipeline} is not set, * then ({@link CryptographyClientBuilder * CryptographyClientBuilder * CryptographyAsyncClient async client}.</p> * * @return A {@link CryptographyAsyncClient} with the options set from the builder. * @throws IllegalStateException If {@link CryptographyClientBuilder * CryptographyClientBuilder */ public CryptographyAsyncClient buildAsyncClient() { if (Strings.isNullOrEmpty(keyId)) { throw logger.logExceptionAsError(new IllegalStateException( "JSON Web Key identifier is required to create cryptography client")); } CryptographyServiceVersion serviceVersion = version != null ? version : CryptographyServiceVersion.getLatest(); if (pipeline != null) { return new CryptographyAsyncClient(keyId, pipeline, serviceVersion); } if (credential == null) { throw logger.logExceptionAsError(new IllegalStateException( "Key Vault credentials are required to build the Cryptography async client")); } HttpPipeline pipeline = setupPipeline(); return new CryptographyAsyncClient(keyId, pipeline, serviceVersion); } TokenCredential getCredential() { return credential; } HttpPipeline getPipeline() { return pipeline; } CryptographyServiceVersion getServiceVersion() { return version; } /** * Sets the identifier of the jsonWebKey from Azure Key Vault to be used for cryptography operations. * * <p>If {@code jsonWebKey} is provided then that takes precedence over key identifier and gets used for * cryptography operations.</p> * * @param keyId The jsonWebKey identifier representing the jsonWebKey stored in jsonWebKey vault. * @return the updated builder object. */ public CryptographyClientBuilder keyIdentifier(String keyId) { this.keyId = keyId; return this; } /** * Sets the credential to use when authenticating HTTP requests. * * @param credential The credential to use for authenticating HTTP requests. * @return the updated builder object. * @throws NullPointerException if {@code credential} is {@code null}. */ public CryptographyClientBuilder credential(TokenCredential credential) { Objects.requireNonNull(credential); this.credential = credential; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated builder object. */ public CryptographyClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after the client required policies. * * @param policy The {@link HttpPipelinePolicy policy} to be added. * @return the updated builder object. * @throws NullPointerException if {@code policy} is {@code null}. */ public CryptographyClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return the updated builder object. * @throws NullPointerException If {@code client} is {@code null}. */ public CryptographyClientBuilder httpClient(HttpClient client) { Objects.requireNonNull(client); this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from jsonWebKey identifier * or jsonWebKey to build the clients. * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return the updated builder object. */ public CryptographyClientBuilder pipeline(HttpPipeline pipeline) { Objects.requireNonNull(pipeline); this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return the updated builder object. */ public CryptographyClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link CryptographyServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link CryptographyServiceVersion} of the service to be used when making requests. * @return The updated CryptographyClientBuilder object. */ public CryptographyClientBuilder serviceVersion(CryptographyServiceVersion version) { this.version = version; return this; } /** * Sets the {@link RetryPolicy} that is used when each request is sent. * * The default retry policy will be used in the pipeline, if not provided. * * @param retryPolicy user's retry policy applied to each request. * @return The updated CryptographyClientBuilder object. * @throws NullPointerException if the specified {@code retryPolicy} is null. */ public CryptographyClientBuilder retryPolicy(RetryPolicy retryPolicy) { Objects.requireNonNull(retryPolicy, "The retry policy cannot be bull"); this.retryPolicy = retryPolicy; return this; } /** * Sets various {@link ClientOptions options} on this client. * * @param clientOptions the {@link ClientOptions} to be set on this client. * @return The updated CryptographyClientBuilder object. */ public CryptographyClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } }
class CryptographyClientBuilder { final List<HttpPipelinePolicy> policies; final Map<String, String> properties; private final ClientLogger logger = new ClientLogger(CryptographyClientBuilder.class); private static final String AZURE_KEY_VAULT_KEYS = "azure-key-vault-keys.properties"; private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private TokenCredential credential; private HttpPipeline pipeline; private String keyId; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private RetryPolicy retryPolicy; private Configuration configuration; private CryptographyServiceVersion version; private ClientOptions clientOptions; /** * The constructor with defaults. */ public CryptographyClientBuilder() { retryPolicy = new RetryPolicy(); httpLogOptions = new HttpLogOptions(); policies = new ArrayList<>(); properties = CoreUtils.getProperties(AZURE_KEY_VAULT_KEYS); } /** * Creates a {@link CryptographyClient} based on options set in the builder. * Every time {@code buildClient()} is called, a new instance of {@link CryptographyClient} is created. * * <p>If {@link CryptographyClientBuilder * ({@link CryptographyClientBuilder * are used to create the {@link CryptographyClient client}. All other builder settings are ignored. If * {@code pipeline} is not set, then * ({@link CryptographyClientBuilder * ({@link CryptographyClientBuilder * {@link CryptographyClient client}.</p> * * @return A {@link CryptographyClient} with the options set from the builder. * @throws IllegalStateException If {@link CryptographyClientBuilder * either of ({@link CryptographyClientBuilder */ public CryptographyClient buildClient() { return new CryptographyClient(buildAsyncClient()); } /** * Creates a {@link CryptographyAsyncClient} based on options set in the builder. Every time * {@link * * <p>If {@link CryptographyClientBuilder * ({@link CryptographyClientBuilder * CryptographyAsyncClient async client}. All other builder settings are ignored. If {@code pipeline} is not set, * then ({@link CryptographyClientBuilder * CryptographyClientBuilder * CryptographyAsyncClient async client}.</p> * * @return A {@link CryptographyAsyncClient} with the options set from the builder. * @throws IllegalStateException If {@link CryptographyClientBuilder * CryptographyClientBuilder */ public CryptographyAsyncClient buildAsyncClient() { if (Strings.isNullOrEmpty(keyId)) { throw logger.logExceptionAsError(new IllegalStateException( "JSON Web Key identifier is required to create cryptography client")); } CryptographyServiceVersion serviceVersion = version != null ? version : CryptographyServiceVersion.getLatest(); if (pipeline != null) { return new CryptographyAsyncClient(keyId, pipeline, serviceVersion); } if (credential == null) { throw logger.logExceptionAsError(new IllegalStateException( "Key Vault credentials are required to build the Cryptography async client")); } HttpPipeline pipeline = setupPipeline(); return new CryptographyAsyncClient(keyId, pipeline, serviceVersion); } TokenCredential getCredential() { return credential; } HttpPipeline getPipeline() { return pipeline; } CryptographyServiceVersion getServiceVersion() { return version; } /** * Sets the identifier of the jsonWebKey from Azure Key Vault to be used for cryptography operations. * * <p>If {@code jsonWebKey} is provided then that takes precedence over key identifier and gets used for * cryptography operations.</p> * * @param keyId The jsonWebKey identifier representing the jsonWebKey stored in jsonWebKey vault. * @return the updated builder object. */ public CryptographyClientBuilder keyIdentifier(String keyId) { this.keyId = keyId; return this; } /** * Sets the credential to use when authenticating HTTP requests. * * @param credential The credential to use for authenticating HTTP requests. * @return the updated builder object. * @throws NullPointerException if {@code credential} is {@code null}. */ public CryptographyClientBuilder credential(TokenCredential credential) { Objects.requireNonNull(credential); this.credential = credential; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated builder object. */ public CryptographyClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after the client required policies. * * @param policy The {@link HttpPipelinePolicy policy} to be added. * @return the updated builder object. * @throws NullPointerException if {@code policy} is {@code null}. */ public CryptographyClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return the updated builder object. * @throws NullPointerException If {@code client} is {@code null}. */ public CryptographyClientBuilder httpClient(HttpClient client) { Objects.requireNonNull(client); this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from jsonWebKey identifier * or jsonWebKey to build the clients. * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return the updated builder object. */ public CryptographyClientBuilder pipeline(HttpPipeline pipeline) { Objects.requireNonNull(pipeline); this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return the updated builder object. */ public CryptographyClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link CryptographyServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link CryptographyServiceVersion} of the service to be used when making requests. * @return The updated CryptographyClientBuilder object. */ public CryptographyClientBuilder serviceVersion(CryptographyServiceVersion version) { this.version = version; return this; } /** * Sets the {@link RetryPolicy} that is used when each request is sent. * * The default retry policy will be used in the pipeline, if not provided. * * @param retryPolicy user's retry policy applied to each request. * @return The updated CryptographyClientBuilder object. * @throws NullPointerException if the specified {@code retryPolicy} is null. */ public CryptographyClientBuilder retryPolicy(RetryPolicy retryPolicy) { Objects.requireNonNull(retryPolicy, "The retry policy cannot be bull"); this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link ClientOptions} which enables various options to be set on the client. For example setting an * {@code applicationId} using {@link ClientOptions * the {@link UserAgentPolicy} for telemetry/monitoring purposes. * * <p>More About <a href="https: * * @param clientOptions the {@link ClientOptions} to be set on the client. * @return The updated CryptographyClientBuilder object. */ public CryptographyClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } }
I think we should be able to, let me see what I can do with the given time.
HttpPipeline setupPipeline() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; final List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); String applicationId = clientOptions == null ? httpLogOptions.getApplicationId() : clientOptions.getApplicationId(); policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy); policies.add(new KeyVaultCredentialPolicy(credential)); policies.addAll(this.policies); if (clientOptions != null) { List<HttpHeader> httpHeaderList = new ArrayList<>(); clientOptions.getHeaders().forEach(header -> httpHeaderList.add(new HttpHeader(header.getName(), header.getValue()))); policies.add(new AddHeadersPolicy(new HttpHeaders(httpHeaderList))); } HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); }
List<HttpHeader> httpHeaderList = new ArrayList<>();
HttpPipeline setupPipeline() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; final List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); String applicationId = clientOptions == null ? httpLogOptions.getApplicationId() : clientOptions.getApplicationId(); policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy); policies.add(new KeyVaultCredentialPolicy(credential)); policies.addAll(this.policies); if (clientOptions != null) { List<HttpHeader> httpHeaderList = new ArrayList<>(); clientOptions.getHeaders().forEach(header -> httpHeaderList.add(new HttpHeader(header.getName(), header.getValue()))); policies.add(new AddHeadersPolicy(new HttpHeaders(httpHeaderList))); } HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); }
class CryptographyClientBuilder { final List<HttpPipelinePolicy> policies; final Map<String, String> properties; private final ClientLogger logger = new ClientLogger(CryptographyClientBuilder.class); private static final String AZURE_KEY_VAULT_KEYS = "azure-key-vault-keys.properties"; private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private TokenCredential credential; private HttpPipeline pipeline; private String keyId; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private RetryPolicy retryPolicy; private Configuration configuration; private CryptographyServiceVersion version; private ClientOptions clientOptions; /** * The constructor with defaults. */ public CryptographyClientBuilder() { retryPolicy = new RetryPolicy(); httpLogOptions = new HttpLogOptions(); policies = new ArrayList<>(); properties = CoreUtils.getProperties(AZURE_KEY_VAULT_KEYS); } /** * Creates a {@link CryptographyClient} based on options set in the builder. * Every time {@code buildClient()} is called, a new instance of {@link CryptographyClient} is created. * * <p>If {@link CryptographyClientBuilder * ({@link CryptographyClientBuilder * are used to create the {@link CryptographyClient client}. All other builder settings are ignored. If * {@code pipeline} is not set, then * ({@link CryptographyClientBuilder * ({@link CryptographyClientBuilder * {@link CryptographyClient client}.</p> * * @return A {@link CryptographyClient} with the options set from the builder. * @throws IllegalStateException If {@link CryptographyClientBuilder * either of ({@link CryptographyClientBuilder */ public CryptographyClient buildClient() { return new CryptographyClient(buildAsyncClient()); } /** * Creates a {@link CryptographyAsyncClient} based on options set in the builder. Every time * {@link * * <p>If {@link CryptographyClientBuilder * ({@link CryptographyClientBuilder * CryptographyAsyncClient async client}. All other builder settings are ignored. If {@code pipeline} is not set, * then ({@link CryptographyClientBuilder * CryptographyClientBuilder * CryptographyAsyncClient async client}.</p> * * @return A {@link CryptographyAsyncClient} with the options set from the builder. * @throws IllegalStateException If {@link CryptographyClientBuilder * CryptographyClientBuilder */ public CryptographyAsyncClient buildAsyncClient() { if (Strings.isNullOrEmpty(keyId)) { throw logger.logExceptionAsError(new IllegalStateException( "JSON Web Key identifier is required to create cryptography client")); } CryptographyServiceVersion serviceVersion = version != null ? version : CryptographyServiceVersion.getLatest(); if (pipeline != null) { return new CryptographyAsyncClient(keyId, pipeline, serviceVersion); } if (credential == null) { throw logger.logExceptionAsError(new IllegalStateException( "Key Vault credentials are required to build the Cryptography async client")); } HttpPipeline pipeline = setupPipeline(); return new CryptographyAsyncClient(keyId, pipeline, serviceVersion); } TokenCredential getCredential() { return credential; } HttpPipeline getPipeline() { return pipeline; } CryptographyServiceVersion getServiceVersion() { return version; } /** * Sets the identifier of the jsonWebKey from Azure Key Vault to be used for cryptography operations. * * <p>If {@code jsonWebKey} is provided then that takes precedence over key identifier and gets used for * cryptography operations.</p> * * @param keyId The jsonWebKey identifier representing the jsonWebKey stored in jsonWebKey vault. * @return the updated builder object. */ public CryptographyClientBuilder keyIdentifier(String keyId) { this.keyId = keyId; return this; } /** * Sets the credential to use when authenticating HTTP requests. * * @param credential The credential to use for authenticating HTTP requests. * @return the updated builder object. * @throws NullPointerException if {@code credential} is {@code null}. */ public CryptographyClientBuilder credential(TokenCredential credential) { Objects.requireNonNull(credential); this.credential = credential; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated builder object. */ public CryptographyClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after the client required policies. * * @param policy The {@link HttpPipelinePolicy policy} to be added. * @return the updated builder object. * @throws NullPointerException if {@code policy} is {@code null}. */ public CryptographyClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return the updated builder object. * @throws NullPointerException If {@code client} is {@code null}. */ public CryptographyClientBuilder httpClient(HttpClient client) { Objects.requireNonNull(client); this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from jsonWebKey identifier * or jsonWebKey to build the clients. * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return the updated builder object. */ public CryptographyClientBuilder pipeline(HttpPipeline pipeline) { Objects.requireNonNull(pipeline); this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return the updated builder object. */ public CryptographyClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link CryptographyServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link CryptographyServiceVersion} of the service to be used when making requests. * @return The updated CryptographyClientBuilder object. */ public CryptographyClientBuilder serviceVersion(CryptographyServiceVersion version) { this.version = version; return this; } /** * Sets the {@link RetryPolicy} that is used when each request is sent. * * The default retry policy will be used in the pipeline, if not provided. * * @param retryPolicy user's retry policy applied to each request. * @return The updated CryptographyClientBuilder object. * @throws NullPointerException if the specified {@code retryPolicy} is null. */ public CryptographyClientBuilder retryPolicy(RetryPolicy retryPolicy) { Objects.requireNonNull(retryPolicy, "The retry policy cannot be bull"); this.retryPolicy = retryPolicy; return this; } /** * Sets various {@link ClientOptions options} on this client. * * @param clientOptions the {@link ClientOptions} to be set on this client. * @return The updated CryptographyClientBuilder object. */ public CryptographyClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } }
class CryptographyClientBuilder { final List<HttpPipelinePolicy> policies; final Map<String, String> properties; private final ClientLogger logger = new ClientLogger(CryptographyClientBuilder.class); private static final String AZURE_KEY_VAULT_KEYS = "azure-key-vault-keys.properties"; private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private TokenCredential credential; private HttpPipeline pipeline; private String keyId; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private RetryPolicy retryPolicy; private Configuration configuration; private CryptographyServiceVersion version; private ClientOptions clientOptions; /** * The constructor with defaults. */ public CryptographyClientBuilder() { retryPolicy = new RetryPolicy(); httpLogOptions = new HttpLogOptions(); policies = new ArrayList<>(); properties = CoreUtils.getProperties(AZURE_KEY_VAULT_KEYS); } /** * Creates a {@link CryptographyClient} based on options set in the builder. * Every time {@code buildClient()} is called, a new instance of {@link CryptographyClient} is created. * * <p>If {@link CryptographyClientBuilder * ({@link CryptographyClientBuilder * are used to create the {@link CryptographyClient client}. All other builder settings are ignored. If * {@code pipeline} is not set, then * ({@link CryptographyClientBuilder * ({@link CryptographyClientBuilder * {@link CryptographyClient client}.</p> * * @return A {@link CryptographyClient} with the options set from the builder. * @throws IllegalStateException If {@link CryptographyClientBuilder * either of ({@link CryptographyClientBuilder */ public CryptographyClient buildClient() { return new CryptographyClient(buildAsyncClient()); } /** * Creates a {@link CryptographyAsyncClient} based on options set in the builder. Every time * {@link * * <p>If {@link CryptographyClientBuilder * ({@link CryptographyClientBuilder * CryptographyAsyncClient async client}. All other builder settings are ignored. If {@code pipeline} is not set, * then ({@link CryptographyClientBuilder * CryptographyClientBuilder * CryptographyAsyncClient async client}.</p> * * @return A {@link CryptographyAsyncClient} with the options set from the builder. * @throws IllegalStateException If {@link CryptographyClientBuilder * CryptographyClientBuilder */ public CryptographyAsyncClient buildAsyncClient() { if (Strings.isNullOrEmpty(keyId)) { throw logger.logExceptionAsError(new IllegalStateException( "JSON Web Key identifier is required to create cryptography client")); } CryptographyServiceVersion serviceVersion = version != null ? version : CryptographyServiceVersion.getLatest(); if (pipeline != null) { return new CryptographyAsyncClient(keyId, pipeline, serviceVersion); } if (credential == null) { throw logger.logExceptionAsError(new IllegalStateException( "Key Vault credentials are required to build the Cryptography async client")); } HttpPipeline pipeline = setupPipeline(); return new CryptographyAsyncClient(keyId, pipeline, serviceVersion); } TokenCredential getCredential() { return credential; } HttpPipeline getPipeline() { return pipeline; } CryptographyServiceVersion getServiceVersion() { return version; } /** * Sets the identifier of the jsonWebKey from Azure Key Vault to be used for cryptography operations. * * <p>If {@code jsonWebKey} is provided then that takes precedence over key identifier and gets used for * cryptography operations.</p> * * @param keyId The jsonWebKey identifier representing the jsonWebKey stored in jsonWebKey vault. * @return the updated builder object. */ public CryptographyClientBuilder keyIdentifier(String keyId) { this.keyId = keyId; return this; } /** * Sets the credential to use when authenticating HTTP requests. * * @param credential The credential to use for authenticating HTTP requests. * @return the updated builder object. * @throws NullPointerException if {@code credential} is {@code null}. */ public CryptographyClientBuilder credential(TokenCredential credential) { Objects.requireNonNull(credential); this.credential = credential; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated builder object. */ public CryptographyClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after the client required policies. * * @param policy The {@link HttpPipelinePolicy policy} to be added. * @return the updated builder object. * @throws NullPointerException if {@code policy} is {@code null}. */ public CryptographyClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return the updated builder object. * @throws NullPointerException If {@code client} is {@code null}. */ public CryptographyClientBuilder httpClient(HttpClient client) { Objects.requireNonNull(client); this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from jsonWebKey identifier * or jsonWebKey to build the clients. * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return the updated builder object. */ public CryptographyClientBuilder pipeline(HttpPipeline pipeline) { Objects.requireNonNull(pipeline); this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return the updated builder object. */ public CryptographyClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link CryptographyServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link CryptographyServiceVersion} of the service to be used when making requests. * @return The updated CryptographyClientBuilder object. */ public CryptographyClientBuilder serviceVersion(CryptographyServiceVersion version) { this.version = version; return this; } /** * Sets the {@link RetryPolicy} that is used when each request is sent. * * The default retry policy will be used in the pipeline, if not provided. * * @param retryPolicy user's retry policy applied to each request. * @return The updated CryptographyClientBuilder object. * @throws NullPointerException if the specified {@code retryPolicy} is null. */ public CryptographyClientBuilder retryPolicy(RetryPolicy retryPolicy) { Objects.requireNonNull(retryPolicy, "The retry policy cannot be bull"); this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link ClientOptions} which enables various options to be set on the client. For example setting an * {@code applicationId} using {@link ClientOptions * the {@link UserAgentPolicy} for telemetry/monitoring purposes. * * <p>More About <a href="https: * * @param clientOptions the {@link ClientOptions} to be set on the client. * @return The updated CryptographyClientBuilder object. */ public CryptographyClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } }
Will add these tests in a future iteration. Discussed it with @hemanttanwar in a side conversation.
HttpPipeline setupPipeline() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; final List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); String applicationId = clientOptions == null ? httpLogOptions.getApplicationId() : clientOptions.getApplicationId(); policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy); policies.add(new KeyVaultCredentialPolicy(credential)); policies.addAll(this.policies); if (clientOptions != null) { List<HttpHeader> httpHeaderList = new ArrayList<>(); clientOptions.getHeaders().forEach(header -> httpHeaderList.add(new HttpHeader(header.getName(), header.getValue()))); policies.add(new AddHeadersPolicy(new HttpHeaders(httpHeaderList))); } HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); }
List<HttpHeader> httpHeaderList = new ArrayList<>();
HttpPipeline setupPipeline() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; final List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); String applicationId = clientOptions == null ? httpLogOptions.getApplicationId() : clientOptions.getApplicationId(); policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy); policies.add(new KeyVaultCredentialPolicy(credential)); policies.addAll(this.policies); if (clientOptions != null) { List<HttpHeader> httpHeaderList = new ArrayList<>(); clientOptions.getHeaders().forEach(header -> httpHeaderList.add(new HttpHeader(header.getName(), header.getValue()))); policies.add(new AddHeadersPolicy(new HttpHeaders(httpHeaderList))); } HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); }
class CryptographyClientBuilder { final List<HttpPipelinePolicy> policies; final Map<String, String> properties; private final ClientLogger logger = new ClientLogger(CryptographyClientBuilder.class); private static final String AZURE_KEY_VAULT_KEYS = "azure-key-vault-keys.properties"; private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private TokenCredential credential; private HttpPipeline pipeline; private String keyId; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private RetryPolicy retryPolicy; private Configuration configuration; private CryptographyServiceVersion version; private ClientOptions clientOptions; /** * The constructor with defaults. */ public CryptographyClientBuilder() { retryPolicy = new RetryPolicy(); httpLogOptions = new HttpLogOptions(); policies = new ArrayList<>(); properties = CoreUtils.getProperties(AZURE_KEY_VAULT_KEYS); } /** * Creates a {@link CryptographyClient} based on options set in the builder. * Every time {@code buildClient()} is called, a new instance of {@link CryptographyClient} is created. * * <p>If {@link CryptographyClientBuilder * ({@link CryptographyClientBuilder * are used to create the {@link CryptographyClient client}. All other builder settings are ignored. If * {@code pipeline} is not set, then * ({@link CryptographyClientBuilder * ({@link CryptographyClientBuilder * {@link CryptographyClient client}.</p> * * @return A {@link CryptographyClient} with the options set from the builder. * @throws IllegalStateException If {@link CryptographyClientBuilder * either of ({@link CryptographyClientBuilder */ public CryptographyClient buildClient() { return new CryptographyClient(buildAsyncClient()); } /** * Creates a {@link CryptographyAsyncClient} based on options set in the builder. Every time * {@link * * <p>If {@link CryptographyClientBuilder * ({@link CryptographyClientBuilder * CryptographyAsyncClient async client}. All other builder settings are ignored. If {@code pipeline} is not set, * then ({@link CryptographyClientBuilder * CryptographyClientBuilder * CryptographyAsyncClient async client}.</p> * * @return A {@link CryptographyAsyncClient} with the options set from the builder. * @throws IllegalStateException If {@link CryptographyClientBuilder * CryptographyClientBuilder */ public CryptographyAsyncClient buildAsyncClient() { if (Strings.isNullOrEmpty(keyId)) { throw logger.logExceptionAsError(new IllegalStateException( "JSON Web Key identifier is required to create cryptography client")); } CryptographyServiceVersion serviceVersion = version != null ? version : CryptographyServiceVersion.getLatest(); if (pipeline != null) { return new CryptographyAsyncClient(keyId, pipeline, serviceVersion); } if (credential == null) { throw logger.logExceptionAsError(new IllegalStateException( "Key Vault credentials are required to build the Cryptography async client")); } HttpPipeline pipeline = setupPipeline(); return new CryptographyAsyncClient(keyId, pipeline, serviceVersion); } TokenCredential getCredential() { return credential; } HttpPipeline getPipeline() { return pipeline; } CryptographyServiceVersion getServiceVersion() { return version; } /** * Sets the identifier of the jsonWebKey from Azure Key Vault to be used for cryptography operations. * * <p>If {@code jsonWebKey} is provided then that takes precedence over key identifier and gets used for * cryptography operations.</p> * * @param keyId The jsonWebKey identifier representing the jsonWebKey stored in jsonWebKey vault. * @return the updated builder object. */ public CryptographyClientBuilder keyIdentifier(String keyId) { this.keyId = keyId; return this; } /** * Sets the credential to use when authenticating HTTP requests. * * @param credential The credential to use for authenticating HTTP requests. * @return the updated builder object. * @throws NullPointerException if {@code credential} is {@code null}. */ public CryptographyClientBuilder credential(TokenCredential credential) { Objects.requireNonNull(credential); this.credential = credential; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated builder object. */ public CryptographyClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after the client required policies. * * @param policy The {@link HttpPipelinePolicy policy} to be added. * @return the updated builder object. * @throws NullPointerException if {@code policy} is {@code null}. */ public CryptographyClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return the updated builder object. * @throws NullPointerException If {@code client} is {@code null}. */ public CryptographyClientBuilder httpClient(HttpClient client) { Objects.requireNonNull(client); this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from jsonWebKey identifier * or jsonWebKey to build the clients. * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return the updated builder object. */ public CryptographyClientBuilder pipeline(HttpPipeline pipeline) { Objects.requireNonNull(pipeline); this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return the updated builder object. */ public CryptographyClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link CryptographyServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link CryptographyServiceVersion} of the service to be used when making requests. * @return The updated CryptographyClientBuilder object. */ public CryptographyClientBuilder serviceVersion(CryptographyServiceVersion version) { this.version = version; return this; } /** * Sets the {@link RetryPolicy} that is used when each request is sent. * * The default retry policy will be used in the pipeline, if not provided. * * @param retryPolicy user's retry policy applied to each request. * @return The updated CryptographyClientBuilder object. * @throws NullPointerException if the specified {@code retryPolicy} is null. */ public CryptographyClientBuilder retryPolicy(RetryPolicy retryPolicy) { Objects.requireNonNull(retryPolicy, "The retry policy cannot be bull"); this.retryPolicy = retryPolicy; return this; } /** * Sets various {@link ClientOptions options} on this client. * * @param clientOptions the {@link ClientOptions} to be set on this client. * @return The updated CryptographyClientBuilder object. */ public CryptographyClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } }
class CryptographyClientBuilder { final List<HttpPipelinePolicy> policies; final Map<String, String> properties; private final ClientLogger logger = new ClientLogger(CryptographyClientBuilder.class); private static final String AZURE_KEY_VAULT_KEYS = "azure-key-vault-keys.properties"; private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private TokenCredential credential; private HttpPipeline pipeline; private String keyId; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private RetryPolicy retryPolicy; private Configuration configuration; private CryptographyServiceVersion version; private ClientOptions clientOptions; /** * The constructor with defaults. */ public CryptographyClientBuilder() { retryPolicy = new RetryPolicy(); httpLogOptions = new HttpLogOptions(); policies = new ArrayList<>(); properties = CoreUtils.getProperties(AZURE_KEY_VAULT_KEYS); } /** * Creates a {@link CryptographyClient} based on options set in the builder. * Every time {@code buildClient()} is called, a new instance of {@link CryptographyClient} is created. * * <p>If {@link CryptographyClientBuilder * ({@link CryptographyClientBuilder * are used to create the {@link CryptographyClient client}. All other builder settings are ignored. If * {@code pipeline} is not set, then * ({@link CryptographyClientBuilder * ({@link CryptographyClientBuilder * {@link CryptographyClient client}.</p> * * @return A {@link CryptographyClient} with the options set from the builder. * @throws IllegalStateException If {@link CryptographyClientBuilder * either of ({@link CryptographyClientBuilder */ public CryptographyClient buildClient() { return new CryptographyClient(buildAsyncClient()); } /** * Creates a {@link CryptographyAsyncClient} based on options set in the builder. Every time * {@link * * <p>If {@link CryptographyClientBuilder * ({@link CryptographyClientBuilder * CryptographyAsyncClient async client}. All other builder settings are ignored. If {@code pipeline} is not set, * then ({@link CryptographyClientBuilder * CryptographyClientBuilder * CryptographyAsyncClient async client}.</p> * * @return A {@link CryptographyAsyncClient} with the options set from the builder. * @throws IllegalStateException If {@link CryptographyClientBuilder * CryptographyClientBuilder */ public CryptographyAsyncClient buildAsyncClient() { if (Strings.isNullOrEmpty(keyId)) { throw logger.logExceptionAsError(new IllegalStateException( "JSON Web Key identifier is required to create cryptography client")); } CryptographyServiceVersion serviceVersion = version != null ? version : CryptographyServiceVersion.getLatest(); if (pipeline != null) { return new CryptographyAsyncClient(keyId, pipeline, serviceVersion); } if (credential == null) { throw logger.logExceptionAsError(new IllegalStateException( "Key Vault credentials are required to build the Cryptography async client")); } HttpPipeline pipeline = setupPipeline(); return new CryptographyAsyncClient(keyId, pipeline, serviceVersion); } TokenCredential getCredential() { return credential; } HttpPipeline getPipeline() { return pipeline; } CryptographyServiceVersion getServiceVersion() { return version; } /** * Sets the identifier of the jsonWebKey from Azure Key Vault to be used for cryptography operations. * * <p>If {@code jsonWebKey} is provided then that takes precedence over key identifier and gets used for * cryptography operations.</p> * * @param keyId The jsonWebKey identifier representing the jsonWebKey stored in jsonWebKey vault. * @return the updated builder object. */ public CryptographyClientBuilder keyIdentifier(String keyId) { this.keyId = keyId; return this; } /** * Sets the credential to use when authenticating HTTP requests. * * @param credential The credential to use for authenticating HTTP requests. * @return the updated builder object. * @throws NullPointerException if {@code credential} is {@code null}. */ public CryptographyClientBuilder credential(TokenCredential credential) { Objects.requireNonNull(credential); this.credential = credential; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated builder object. */ public CryptographyClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after the client required policies. * * @param policy The {@link HttpPipelinePolicy policy} to be added. * @return the updated builder object. * @throws NullPointerException if {@code policy} is {@code null}. */ public CryptographyClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return the updated builder object. * @throws NullPointerException If {@code client} is {@code null}. */ public CryptographyClientBuilder httpClient(HttpClient client) { Objects.requireNonNull(client); this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from jsonWebKey identifier * or jsonWebKey to build the clients. * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return the updated builder object. */ public CryptographyClientBuilder pipeline(HttpPipeline pipeline) { Objects.requireNonNull(pipeline); this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return the updated builder object. */ public CryptographyClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link CryptographyServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link CryptographyServiceVersion} of the service to be used when making requests. * @return The updated CryptographyClientBuilder object. */ public CryptographyClientBuilder serviceVersion(CryptographyServiceVersion version) { this.version = version; return this; } /** * Sets the {@link RetryPolicy} that is used when each request is sent. * * The default retry policy will be used in the pipeline, if not provided. * * @param retryPolicy user's retry policy applied to each request. * @return The updated CryptographyClientBuilder object. * @throws NullPointerException if the specified {@code retryPolicy} is null. */ public CryptographyClientBuilder retryPolicy(RetryPolicy retryPolicy) { Objects.requireNonNull(retryPolicy, "The retry policy cannot be bull"); this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link ClientOptions} which enables various options to be set on the client. For example setting an * {@code applicationId} using {@link ClientOptions * the {@link UserAgentPolicy} for telemetry/monitoring purposes. * * <p>More About <a href="https: * * @param clientOptions the {@link ClientOptions} to be set on the client. * @return The updated CryptographyClientBuilder object. */ public CryptographyClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } }
Duration.toSeconds() is a java9 + api. doesn't exist on java8 https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/time/Duration.html#toSeconds() ```suggestion long remainingSeconds = this.waitTimeInSeconds - (this.getElapsedTimeSupplier.get().toMillis() / 1000); ```
public Mono<ShouldRetryResult> shouldRetry(Exception exception) { CosmosException exceptionToThrow; Duration backoffTime = Duration.ofSeconds(0); Duration timeout; boolean forceRefreshAddressCache; if (!isRetryableException(exception)) { logger.debug("Operation will NOT be retried. Current attempt {}, Exception: ", this.attemptCount, exception); return Mono.just(ShouldRetryResult.noRetry()); } else if (exception instanceof GoneException && !request.isReadOnly() && BridgeInternal.hasSendingRequestStarted((CosmosException)exception)) { logger.warn( "Operation will NOT be retried. Write operations can not be retried safely when sending the request " + "to the service because they aren't idempotent. Current attempt {}, Exception: ", this.attemptCount, exception); return Mono.just(ShouldRetryResult.noRetry( Quadruple.with(true, true, Duration.ofMillis(0), this.attemptCount))); } long remainingSeconds = this.waitTimeInSeconds - this.getElapsedTimeSupplier.get().toSeconds(); int currentRetryAttemptCount = this.attemptCount; if (this.attemptCount++ > 1) { if (remainingSeconds <= 0) { exceptionToThrow = logAndWrapExceptionWithLastRetryWithException(exception); return Mono.just(ShouldRetryResult.error(exceptionToThrow)); } backoffTime = Duration.ofSeconds(Math.min(Math.min(this.currentBackoffSeconds, remainingSeconds), GoneRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_SECONDS)); this.currentBackoffSeconds *= GoneRetryPolicy.BACK_OFF_MULTIPLIER; logger.debug("BackoffTime: {} seconds.", backoffTime.getSeconds()); } long timeoutInMillSec = remainingSeconds*1000 - backoffTime.toMillis(); timeout = timeoutInMillSec > 0 ? Duration.ofMillis(timeoutInMillSec) : Duration.ofSeconds(GoneRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_SECONDS); Pair<Mono<ShouldRetryResult>, Boolean> exceptionHandlingResult = handleException(exception); Mono<ShouldRetryResult> result = exceptionHandlingResult.getLeft(); if (result != null) { return result; } forceRefreshAddressCache = exceptionHandlingResult.getRight(); return Mono.just(ShouldRetryResult.retryAfter(backoffTime, Quadruple.with(forceRefreshAddressCache, true, timeout, currentRetryAttemptCount))); }
long remainingSeconds = this.waitTimeInSeconds - this.getElapsedTimeSupplier.get().toSeconds();
public Mono<ShouldRetryResult> shouldRetry(Exception exception) { return this.retryWithRetryPolicy.shouldRetry(exception) .flatMap((retryWithResult) -> { if (retryWithResult.shouldRetry) { return Mono.just(retryWithResult); } return this.goneRetryPolicy.shouldRetry(exception) .flatMap((goneRetryResult) -> { if (!goneRetryResult.shouldRetry) { logger.debug("Operation will NOT be retried. Exception:", exception); this.end = Instant.now(); } return Mono.just(goneRetryResult); }); }); }
class GoneAndRetryWithRetryPolicy extends RetryPolicyWithDiagnostics implements LastRetryWithExceptionHolder, LastRetryWithExceptionProvider { private final static Logger logger = LoggerFactory.getLogger(GoneAndRetryWithRetryPolicy.class); private final GoneRetryPolicy goneRetryPolicy; private final RetryWithRetryPolicy retryWithRetryPolicy; private final Instant start; private volatile Instant end; private RetryWithException lastRetryWithException; public GoneAndRetryWithRetryPolicy(RxDocumentServiceRequest request, Integer waitTimeInSeconds) { this.goneRetryPolicy = new GoneRetryPolicy( request, waitTimeInSeconds, this::getElapsedTime, this); this.retryWithRetryPolicy = new RetryWithRetryPolicy( waitTimeInSeconds, this::getElapsedTime, this); this.start = Instant.now(); } @Override public void setLastRetryWithException(RetryWithException lastRetryWithException) { this.lastRetryWithException = lastRetryWithException; } @Override public RetryWithException getLastRetryWithException() { return this.lastRetryWithException; } @Override private Duration getElapsedTime() { Instant endSnapshot = this.end != null ? this.end : Instant.now(); return Duration.between(this.start, endSnapshot); } static class GoneRetryPolicy extends RetryPolicyWithDiagnostics { private final static int DEFAULT_WAIT_TIME_IN_SECONDS = 30; private final static int MAXIMUM_BACKOFF_TIME_IN_SECONDS = 15; private final static int INITIAL_BACKOFF_TIME = 1; private final static int BACK_OFF_MULTIPLIER = 2; private final RxDocumentServiceRequest request; private volatile int attemptCount = 1; private volatile int attemptCountInvalidPartition = 1; private volatile int currentBackoffSeconds = GoneRetryPolicy.INITIAL_BACKOFF_TIME; private final Supplier<Duration> getElapsedTimeSupplier; private final int waitTimeInSeconds; private final LastRetryWithExceptionProvider lastRetryWithExceptionProvider; public final static Quadruple<Boolean, Boolean, Duration, Integer> INITIAL_ARGUMENT_VALUE_POLICY_ARG = Quadruple.with(false, false, Duration.ofSeconds(60), 0); public GoneRetryPolicy( RxDocumentServiceRequest request, Integer waitTimeInSeconds, Supplier<Duration> getElapsedTimeSupplier, LastRetryWithExceptionProvider lastRetryWithExceptionProvider) { checkNotNull(request, "request must not be null."); this.request = request; this.getElapsedTimeSupplier = getElapsedTimeSupplier; this.waitTimeInSeconds = waitTimeInSeconds != null ? waitTimeInSeconds : DEFAULT_WAIT_TIME_IN_SECONDS; this.lastRetryWithExceptionProvider = lastRetryWithExceptionProvider; } private boolean isRetryableException(Exception exception) { if (exception instanceof GoneException || exception instanceof RetryWithException || exception instanceof PartitionIsMigratingException || exception instanceof PartitionKeyRangeIsSplittingException) { return true; } if (exception instanceof InvalidPartitionException) { return this.request.getPartitionKeyRangeIdentity() == null || this.request.getPartitionKeyRangeIdentity().getCollectionRid() == null; } return false; } private CosmosException logAndWrapExceptionWithLastRetryWithException(Exception exception) { String exceptionType; if (exception instanceof GoneException) { exceptionType = "GoneException"; } else if (exception instanceof PartitionKeyRangeGoneException) { exceptionType = "PartitionKeyRangeGoneException"; } else if (exception instanceof InvalidPartitionException) { exceptionType = "InvalidPartitionException"; } else if (exception instanceof PartitionKeyRangeIsSplittingException) { exceptionType = "PartitionKeyRangeIsSplittingException"; } else if (exception instanceof CosmosException) { logger.warn("Received CosmosException after backoff/retry. Will fail the request.", exception); return (CosmosException)exception; } else { throw new IllegalStateException("Invalid exception type", exception); } RetryWithException lastRetryWithExceptionSnapshot = lastRetryWithExceptionProvider.getLastRetryWithException(); if (lastRetryWithExceptionSnapshot != null) { logger.warn( "Received {} after backoff/retry including at least one RetryWithException. " + "Will fail the request with RetryWithException. {}: {}. RetryWithException: {}", exceptionType, exceptionType, exception, lastRetryWithExceptionSnapshot); return lastRetryWithExceptionSnapshot; } logger.warn( "Received {} after backoff/retry. Will fail the request. {}", exceptionType, exception); return BridgeInternal.createServiceUnavailableException(exception); } @Override public Mono<ShouldRetryResult> shouldRetry(Exception exception) { CosmosException exceptionToThrow; Duration backoffTime = Duration.ofSeconds(0); Duration timeout; boolean forceRefreshAddressCache; if (!isRetryableException(exception)) { logger.debug("Operation will NOT be retried. Current attempt {}, Exception: ", this.attemptCount, exception); return Mono.just(ShouldRetryResult.noRetry()); } else if (exception instanceof GoneException && !request.isReadOnly() && BridgeInternal.hasSendingRequestStarted((CosmosException)exception)) { logger.warn( "Operation will NOT be retried. Write operations can not be retried safely when sending the request " + "to the service because they aren't idempotent. Current attempt {}, Exception: ", this.attemptCount, exception); return Mono.just(ShouldRetryResult.noRetry( Quadruple.with(true, true, Duration.ofMillis(0), this.attemptCount))); } long remainingSeconds = this.waitTimeInSeconds - this.getElapsedTimeSupplier.get().toSeconds(); int currentRetryAttemptCount = this.attemptCount; if (this.attemptCount++ > 1) { if (remainingSeconds <= 0) { exceptionToThrow = logAndWrapExceptionWithLastRetryWithException(exception); return Mono.just(ShouldRetryResult.error(exceptionToThrow)); } backoffTime = Duration.ofSeconds(Math.min(Math.min(this.currentBackoffSeconds, remainingSeconds), GoneRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_SECONDS)); this.currentBackoffSeconds *= GoneRetryPolicy.BACK_OFF_MULTIPLIER; logger.debug("BackoffTime: {} seconds.", backoffTime.getSeconds()); } long timeoutInMillSec = remainingSeconds*1000 - backoffTime.toMillis(); timeout = timeoutInMillSec > 0 ? Duration.ofMillis(timeoutInMillSec) : Duration.ofSeconds(GoneRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_SECONDS); Pair<Mono<ShouldRetryResult>, Boolean> exceptionHandlingResult = handleException(exception); Mono<ShouldRetryResult> result = exceptionHandlingResult.getLeft(); if (result != null) { return result; } forceRefreshAddressCache = exceptionHandlingResult.getRight(); return Mono.just(ShouldRetryResult.retryAfter(backoffTime, Quadruple.with(forceRefreshAddressCache, true, timeout, currentRetryAttemptCount))); } private Pair<Mono<ShouldRetryResult>, Boolean> handleException(Exception exception) { if (exception instanceof GoneException) { return handleGoneException((GoneException)exception); } else if (exception instanceof PartitionIsMigratingException) { return handlePartitionIsMigratingException((PartitionIsMigratingException)exception); } else if (exception instanceof InvalidPartitionException) { return handleInvalidPartitionException((InvalidPartitionException)exception); } else if (exception instanceof PartitionKeyRangeIsSplittingException) { return handlePartitionKeyIsSplittingException((PartitionKeyRangeIsSplittingException) exception); } throw new IllegalStateException("Invalid exception type", exception); } private Pair<Mono<ShouldRetryResult>, Boolean> handleGoneException(GoneException exception) { logger.info("Received gone exception, will retry, {}", exception.toString()); return Pair.of(null, true); } private Pair<Mono<ShouldRetryResult>, Boolean> handlePartitionIsMigratingException(PartitionIsMigratingException exception) { logger.info("Received PartitionIsMigratingException, will retry, {}", exception.toString()); this.request.forceCollectionRoutingMapRefresh = true; return Pair.of(null, true); } private Pair<Mono<ShouldRetryResult>, Boolean> handlePartitionKeyIsSplittingException(PartitionKeyRangeIsSplittingException exception) { this.request.requestContext.resolvedPartitionKeyRange = null; this.request.requestContext.quorumSelectedLSN = -1; this.request.requestContext.quorumSelectedStoreResponse = null; logger.info("Received partition key range splitting exception, will retry, {}", exception.toString()); this.request.forcePartitionKeyRangeRefresh = true; return Pair.of(null, false); } private Pair<Mono<ShouldRetryResult>, Boolean> handleInvalidPartitionException(InvalidPartitionException exception) { this.request.requestContext.quorumSelectedLSN = -1; this.request.requestContext.resolvedPartitionKeyRange = null; this.request.requestContext.quorumSelectedStoreResponse = null; this.request.requestContext.globalCommittedSelectedLSN = -1; if (this.attemptCountInvalidPartition++ > 2) { logger.warn("Received second InvalidPartitionException after backoff/retry. Will fail the request. {}", exception.toString()); return Pair.of( Mono.just(ShouldRetryResult.error(BridgeInternal.createServiceUnavailableException(exception))), false); } logger.info("Received invalid collection exception, will retry, {}", exception.toString()); this.request.forceNameCacheRefresh = true; return Pair.of(null, false); } } static class RetryWithRetryPolicy extends RetryPolicyWithDiagnostics { private final static int DEFAULT_WAIT_TIME_IN_SECONDS = 30; private final static int MAXIMUM_BACKOFF_TIME_IN_MS = 15000; private final static int INITIAL_BACKOFF_TIME_MS = 10; private final static int BACK_OFF_MULTIPLIER = 2; private volatile int attemptCount = 1; private volatile int currentBackoffMilliseconds = RetryWithRetryPolicy.INITIAL_BACKOFF_TIME_MS; private final int waitTimeInSeconds; private final Supplier<Duration> getElapsedTimeSupplier; private final LastRetryWithExceptionHolder lastRetryWithExceptionHolder; public final static Quadruple<Boolean, Boolean, Duration, Integer> INITIAL_ARGUMENT_VALUE_POLICY_ARG = Quadruple.with(false, false, Duration.ofSeconds(60), 0); public RetryWithRetryPolicy(Integer waitTimeInSeconds, Supplier<Duration> getElapsedTimeSupplier, LastRetryWithExceptionHolder lastRetryWithExceptionHolder) { this.waitTimeInSeconds = waitTimeInSeconds != null ? waitTimeInSeconds : DEFAULT_WAIT_TIME_IN_SECONDS; this.getElapsedTimeSupplier = getElapsedTimeSupplier; this.lastRetryWithExceptionHolder = lastRetryWithExceptionHolder; } @Override public Mono<ShouldRetryResult> shouldRetry(Exception exception) { Duration backoffTime; Duration timeout; if (!(exception instanceof RetryWithException)) { logger.debug("Operation will NOT be retried. Current attempt {}, Exception: ", this.attemptCount, exception); return Mono.just(ShouldRetryResult.noRetry()); } RetryWithException lastRetryWithException = (RetryWithException)exception; this.lastRetryWithExceptionHolder.setLastRetryWithException(lastRetryWithException); long remainingMilliseconds = (this.waitTimeInSeconds * 1_000L) - this.getElapsedTimeSupplier.get().toMillis(); int currentRetryAttemptCount = this.attemptCount++; if (remainingMilliseconds <= 0) { logger.warn("Received RetryWithException after backoff/retry. Will fail the request.", lastRetryWithException); return Mono.just(ShouldRetryResult.error(lastRetryWithException)); } backoffTime = Duration.ofMillis( Math.min( Math.min(this.currentBackoffMilliseconds, remainingMilliseconds), RetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_MS)); this.currentBackoffMilliseconds *= RetryWithRetryPolicy.BACK_OFF_MULTIPLIER; logger.debug("BackoffTime: {} ms.", backoffTime.toMillis()); long timeoutInMillSec = remainingMilliseconds - backoffTime.toMillis(); timeout = timeoutInMillSec > 0 ? Duration.ofMillis(timeoutInMillSec) : Duration.ofMillis(RetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_MS); logger.info("Received RetryWithException, will retry, ", exception); return Mono.just(ShouldRetryResult.retryAfter(backoffTime, Quadruple.with(false, true, timeout, currentRetryAttemptCount))); } } }
class GoneAndRetryWithRetryPolicy extends RetryPolicyWithDiagnostics{ private final static Logger logger = LoggerFactory.getLogger(GoneAndRetryWithRetryPolicy.class); private final GoneRetryPolicy goneRetryPolicy; private final RetryWithRetryPolicy retryWithRetryPolicy; private final Instant start; private volatile Instant end; private volatile RetryWithException lastRetryWithException; public GoneAndRetryWithRetryPolicy(RxDocumentServiceRequest request, Integer waitTimeInSeconds) { this.goneRetryPolicy = new GoneRetryPolicy( request, waitTimeInSeconds); this.retryWithRetryPolicy = new RetryWithRetryPolicy( waitTimeInSeconds); this.start = Instant.now(); } @Override private Duration getElapsedTime() { Instant endSnapshot = this.end != null ? this.end : Instant.now(); return Duration.between(this.start, endSnapshot); } class GoneRetryPolicy extends RetryPolicyWithDiagnostics { private final static int DEFAULT_WAIT_TIME_IN_SECONDS = 30; private final static int MAXIMUM_BACKOFF_TIME_IN_SECONDS = 15; private final static int INITIAL_BACKOFF_TIME = 1; private final static int BACK_OFF_MULTIPLIER = 2; private final RxDocumentServiceRequest request; private volatile int attemptCount = 1; private volatile int attemptCountInvalidPartition = 1; private volatile int currentBackoffSeconds = GoneRetryPolicy.INITIAL_BACKOFF_TIME; private final int waitTimeInSeconds; public GoneRetryPolicy( RxDocumentServiceRequest request, Integer waitTimeInSeconds) { checkNotNull(request, "request must not be null."); this.request = request; this.waitTimeInSeconds = waitTimeInSeconds != null ? waitTimeInSeconds : DEFAULT_WAIT_TIME_IN_SECONDS; } private boolean isNonRetryableException(Exception exception) { if (exception instanceof GoneException || exception instanceof RetryWithException || exception instanceof PartitionIsMigratingException || exception instanceof PartitionKeyRangeIsSplittingException) { return false; } if (exception instanceof InvalidPartitionException) { return this.request.getPartitionKeyRangeIdentity() != null && this.request.getPartitionKeyRangeIdentity().getCollectionRid() != null; } return true; } private CosmosException logAndWrapExceptionWithLastRetryWithException(Exception exception) { String exceptionType; if (exception instanceof GoneException) { exceptionType = "GoneException"; } else if (exception instanceof PartitionKeyRangeGoneException) { exceptionType = "PartitionKeyRangeGoneException"; } else if (exception instanceof InvalidPartitionException) { exceptionType = "InvalidPartitionException"; } else if (exception instanceof PartitionKeyRangeIsSplittingException) { exceptionType = "PartitionKeyRangeIsSplittingException"; } else if (exception instanceof CosmosException) { logger.warn("Received CosmosException after backoff/retry. Will fail the request.", exception); return (CosmosException)exception; } else { throw new IllegalStateException("Invalid exception type", exception); } RetryWithException lastRetryWithExceptionSnapshot = GoneAndRetryWithRetryPolicy.this.lastRetryWithException; if (lastRetryWithExceptionSnapshot != null) { logger.warn( "Received {} after backoff/retry including at least one RetryWithException. " + "Will fail the request with RetryWithException. {}: {}. RetryWithException: {}", exceptionType, exceptionType, exception, lastRetryWithExceptionSnapshot); return lastRetryWithExceptionSnapshot; } logger.warn( "Received {} after backoff/retry. Will fail the request. {}", exceptionType, exception); return BridgeInternal.createServiceUnavailableException(exception); } @Override public Mono<ShouldRetryResult> shouldRetry(Exception exception) { CosmosException exceptionToThrow; Duration backoffTime = Duration.ofSeconds(0); Duration timeout; boolean forceRefreshAddressCache; if (isNonRetryableException(exception)) { logger.debug("Operation will NOT be retried. Current attempt {}, Exception: ", this.attemptCount, exception); return Mono.just(ShouldRetryResult.noRetry()); } else if (exception instanceof GoneException && !request.isReadOnly() && BridgeInternal.hasSendingRequestStarted((CosmosException)exception)) { logger.warn( "Operation will NOT be retried. Write operations can not be retried safely when sending the request " + "to the service because they aren't idempotent. Current attempt {}, Exception: ", this.attemptCount, exception); return Mono.just(ShouldRetryResult.noRetry( Quadruple.with(true, true, Duration.ofMillis(0), this.attemptCount))); } long remainingSeconds = this.waitTimeInSeconds - GoneAndRetryWithRetryPolicy.this.getElapsedTime().toMillis() / 1_000L; int currentRetryAttemptCount = this.attemptCount; if (this.attemptCount++ > 1) { if (remainingSeconds <= 0) { exceptionToThrow = logAndWrapExceptionWithLastRetryWithException(exception); return Mono.just(ShouldRetryResult.error(exceptionToThrow)); } backoffTime = Duration.ofSeconds(Math.min(Math.min(this.currentBackoffSeconds, remainingSeconds), GoneRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_SECONDS)); this.currentBackoffSeconds *= GoneRetryPolicy.BACK_OFF_MULTIPLIER; logger.debug("BackoffTime: {} seconds.", backoffTime.getSeconds()); } long timeoutInMillSec = remainingSeconds*1000 - backoffTime.toMillis(); timeout = timeoutInMillSec > 0 ? Duration.ofMillis(timeoutInMillSec) : Duration.ofSeconds(GoneRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_SECONDS); Pair<Mono<ShouldRetryResult>, Boolean> exceptionHandlingResult = handleException(exception); Mono<ShouldRetryResult> result = exceptionHandlingResult.getLeft(); if (result != null) { return result; } forceRefreshAddressCache = exceptionHandlingResult.getRight(); return Mono.just(ShouldRetryResult.retryAfter(backoffTime, Quadruple.with(forceRefreshAddressCache, true, timeout, currentRetryAttemptCount))); } private Pair<Mono<ShouldRetryResult>, Boolean> handleException(Exception exception) { if (exception instanceof GoneException) { return handleGoneException((GoneException)exception); } else if (exception instanceof PartitionIsMigratingException) { return handlePartitionIsMigratingException((PartitionIsMigratingException)exception); } else if (exception instanceof InvalidPartitionException) { return handleInvalidPartitionException((InvalidPartitionException)exception); } else if (exception instanceof PartitionKeyRangeIsSplittingException) { return handlePartitionKeyIsSplittingException((PartitionKeyRangeIsSplittingException) exception); } throw new IllegalStateException("Invalid exception type", exception); } private Pair<Mono<ShouldRetryResult>, Boolean> handleGoneException(GoneException exception) { logger.info("Received gone exception, will retry, {}", exception.toString()); return Pair.of(null, true); } private Pair<Mono<ShouldRetryResult>, Boolean> handlePartitionIsMigratingException(PartitionIsMigratingException exception) { logger.info("Received PartitionIsMigratingException, will retry, {}", exception.toString()); this.request.forceCollectionRoutingMapRefresh = true; return Pair.of(null, true); } private Pair<Mono<ShouldRetryResult>, Boolean> handlePartitionKeyIsSplittingException(PartitionKeyRangeIsSplittingException exception) { this.request.requestContext.resolvedPartitionKeyRange = null; this.request.requestContext.quorumSelectedLSN = -1; this.request.requestContext.quorumSelectedStoreResponse = null; logger.info("Received partition key range splitting exception, will retry, {}", exception.toString()); this.request.forcePartitionKeyRangeRefresh = true; return Pair.of(null, false); } private Pair<Mono<ShouldRetryResult>, Boolean> handleInvalidPartitionException(InvalidPartitionException exception) { this.request.requestContext.quorumSelectedLSN = -1; this.request.requestContext.resolvedPartitionKeyRange = null; this.request.requestContext.quorumSelectedStoreResponse = null; this.request.requestContext.globalCommittedSelectedLSN = -1; if (this.attemptCountInvalidPartition++ > 2) { logger.warn("Received second InvalidPartitionException after backoff/retry. Will fail the request. {}", exception.toString()); return Pair.of( Mono.just(ShouldRetryResult.error(BridgeInternal.createServiceUnavailableException(exception))), false); } logger.info("Received invalid collection exception, will retry, {}", exception.toString()); this.request.forceNameCacheRefresh = true; return Pair.of(null, false); } } class RetryWithRetryPolicy extends RetryPolicyWithDiagnostics { private final static int DEFAULT_WAIT_TIME_IN_SECONDS = 30; private final static int MAXIMUM_BACKOFF_TIME_IN_MS = 15000; private final static int INITIAL_BACKOFF_TIME_MS = 10; private final static int BACK_OFF_MULTIPLIER = 2; private volatile int attemptCount = 1; private volatile int currentBackoffMilliseconds = RetryWithRetryPolicy.INITIAL_BACKOFF_TIME_MS; private final int waitTimeInSeconds; public RetryWithRetryPolicy(Integer waitTimeInSeconds) { this.waitTimeInSeconds = waitTimeInSeconds != null ? waitTimeInSeconds : DEFAULT_WAIT_TIME_IN_SECONDS; } @Override public Mono<ShouldRetryResult> shouldRetry(Exception exception) { Duration backoffTime; Duration timeout; if (!(exception instanceof RetryWithException)) { logger.debug("Operation will NOT be retried. Current attempt {}, Exception: ", this.attemptCount, exception); return Mono.just(ShouldRetryResult.noRetry()); } RetryWithException lastRetryWithException = (RetryWithException)exception; GoneAndRetryWithRetryPolicy.this.lastRetryWithException = lastRetryWithException; long remainingMilliseconds = (this.waitTimeInSeconds * 1_000L) - GoneAndRetryWithRetryPolicy.this.getElapsedTime().toMillis(); int currentRetryAttemptCount = this.attemptCount++; if (remainingMilliseconds <= 0) { logger.warn("Received RetryWithException after backoff/retry. Will fail the request.", lastRetryWithException); return Mono.just(ShouldRetryResult.error(lastRetryWithException)); } backoffTime = Duration.ofMillis( Math.min( Math.min(this.currentBackoffMilliseconds, remainingMilliseconds), RetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_MS)); this.currentBackoffMilliseconds *= RetryWithRetryPolicy.BACK_OFF_MULTIPLIER; logger.debug("BackoffTime: {} ms.", backoffTime.toMillis()); long timeoutInMillSec = remainingMilliseconds - backoffTime.toMillis(); timeout = timeoutInMillSec > 0 ? Duration.ofMillis(timeoutInMillSec) : Duration.ofMillis(RetryWithRetryPolicy.MAXIMUM_BACKOFF_TIME_IN_MS); logger.info("Received RetryWithException, will retry, ", exception); return Mono.just(ShouldRetryResult.retryAfter(backoffTime, Quadruple.with(false, true, timeout, currentRetryAttemptCount))); } } }
Once this moves into `azure-core` we have a specialized `ByteArrayOutputStream` type that allows access to its internal buffer without copying when `toByteArray` is called.
public static BinaryData fromStream(InputStream inputStream) { Objects.requireNonNull(inputStream, "'inputStream' cannot be null."); final int bufferSize = 1024; try { ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream(); int nRead; byte[] data = new byte[bufferSize]; while ((nRead = inputStream.read(data, 0, data.length)) != -1) { dataOutputBuffer.write(data, 0, nRead); } return fromBytes(dataOutputBuffer.toByteArray()); } catch (IOException ex) { ClientLogger logger = new ClientLogger(BinaryData.class); throw logger.logExceptionAsError(new UncheckedIOException(ex)); } }
ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream();
public static BinaryData fromStream(InputStream inputStream) { Objects.requireNonNull(inputStream, "'inputStream' cannot be null."); final int bufferSize = 1024; try { ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream(); int nRead; byte[] data = new byte[bufferSize]; while ((nRead = inputStream.read(data, 0, data.length)) != -1) { dataOutputBuffer.write(data, 0, nRead); } return fromBytes(dataOutputBuffer.toByteArray()); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } }
class BinaryData { private final byte[] data; BinaryData() { this.data = null; } /** * Create instance of {@link BinaryData} given the data. * @param data to represent as bytes. */ BinaryData(byte[] data) { this.data = data; } /** * Provides {@link InputStream} for the data represented by this {@link BinaryData} object. * * <p><strong>Get InputStream from BinaryData</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.to * * @return {@link InputStream} representing the binary data. */ public InputStream toStream() { return new ByteArrayInputStream(this.data); } /** * Provides {@link Mono} of {@link InputStream} for the data represented by this {@link BinaryData} object. * * @return {@link InputStream} representation of the {@link BinaryData}. */ public Mono<InputStream> toStreamAsync() { return Mono.fromCallable(() -> toStream()); } /** * Create {@link BinaryData} instance with given {@link InputStream} as source of data. The {@link InputStream} is * not closed by this function. * * <p><strong>Create an instance from InputStream</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param inputStream to read bytes from. * @throws UncheckedIOException If any error in reading from {@link InputStream}. * @throws NullPointerException if {@code inputStream} is null. * @return {@link BinaryData} representing the binary data. */ /** * Asynchronously create {@link BinaryData} instance with given {@link InputStream} as source of data. The * {@link InputStream} is not closed by this function. * * @param inputStream to read bytes from. * @throws NullPointerException if {@code inputStream} is null. * @return {@link Mono} of {@link BinaryData} representing the binary data. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) { Objects.requireNonNull(inputStream, "'inputStream' cannot be null."); return Mono.fromCallable(() -> fromStream(inputStream)); } /** * Create {@link BinaryData} instance with given {@link Flux} of {@link ByteBuffer} as source of data. It will * collect all the bytes from {@link ByteBuffer} into {@link BinaryData}. * * <p><strong>Create an instance from String</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param data to use. * @throws NullPointerException if {@code inputStream} is null. * @return {@link Mono} of {@link BinaryData} representing binary data. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) { Objects.requireNonNull(data, "'data' cannot be null."); return FluxUtil.collectBytesInByteBufferStream(data) .flatMap(bytes -> Mono.just(fromBytes(bytes))); } /** * Create {@link BinaryData} instance with given data and character set. * * <p><strong>Create an instance from String</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param data to use. * @param charSet to use. * @throws NullPointerException if {@code inputStream} is null. * @return {@link BinaryData} representing the binary data. */ public static BinaryData fromString(String data, Charset charSet) { Objects.requireNonNull(data, "'data' cannot be null."); return new BinaryData(data.getBytes(charSet)); } /** * Create {@link BinaryData} instance with given data. The {@link String} is converted into bytes using * {@link StandardCharsets * * @param data to use. * @throws NullPointerException if {@code inputStream} is null. * @return {@link BinaryData} representing binary data. */ public static BinaryData fromString(String data) { Objects.requireNonNull(data, "'data' cannot be null."); return new BinaryData(data.getBytes(StandardCharsets.UTF_8)); } /** * Create {@link BinaryData} instance with given byte array data. * * @param data to use. * @return {@link BinaryData} representing the binary data. */ public static BinaryData fromBytes(byte[] data) { return new BinaryData(data); } /** * Serialize the given {@link Object} into {@link BinaryData} using the provided {@link ObjectSerializer}. * * @param data The {@link Object} which needs to be serialized into bytes. * @param serializer to use for serializing the object. * @throws NullPointerException if {@code inputStream} or {@code serializer} is null. * @return {@link BinaryData} representing binary data. */ public static BinaryData fromObject(Object data, ObjectSerializer serializer) { Objects.requireNonNull(data, "'data' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); final ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); serializer.serialize(outputStream, data); return new BinaryData(outputStream.toByteArray()); } /** * Serialize the given {@link Object} into {@link Mono} {@link BinaryData} using the provided * {@link ObjectSerializer}. * * @param data The {@link Object} which needs to be serialized into bytes. * @param serializer to use for serializing the object. * @throws NullPointerException if {@code inputStream} or {@code serializer} is null. * @return {@link Mono} of {@link BinaryData} representing the binary data. */ public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) { Objects.requireNonNull(data, "'data' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); return Mono.fromCallable(() -> fromObject(data, serializer)); } /** * Provides byte array representation of this {@link BinaryData} object. * * @return byte array representation of the the data. */ public byte[] toBytes() { return Arrays.copyOf(this.data, this.data.length); } /** * Provides {@link String} representation of this {@link BinaryData} object. The bytes are converted into * {@link String} using {@link StandardCharsets * * @return {@link String} representation of the data. */ public String toString() { return new String(this.data, StandardCharsets.UTF_8); } /** * Provides {@link String} representation of this {@link BinaryData} object given a character set. * * @param charSet to use to convert bytes into {@link String}. * @return {@link String} representation of the the binary data. */ public String toString(Charset charSet) { return new String(this.data, charSet); } /** * Deserialize the bytes into the {@link Object} of given type by applying the provided {@link ObjectSerializer} on * the data. * * @param clazz representing the type of the Object. * @param serializer to use deserialize data into type. * @param <T> Generic type that the data is deserialized into. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) { Objects.requireNonNull(clazz, "'clazz' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); TypeReference<T> ref = TypeReference.createInstance(clazz); InputStream jsonStream = new ByteArrayInputStream(this.data); return serializer.deserialize(jsonStream, ref); } /** * Return a {@link Mono} by deserialize the bytes into the {@link Object} of given type after applying the provided * {@link ObjectSerializer} on the {@link BinaryData}. * * <p><strong>Gets the specified object</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.to * * @param clazz representing the type of the Object. * @param serializer to use deserialize data into type. * @param <T> Generic type that the data is deserialized into. * @throws NullPointerException if {@code clazz} or {@code serializer} is null. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) { return Mono.fromCallable(() -> toObject(clazz, serializer)); } }
class BinaryData { private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class); private final byte[] data; /** * Create instance of {@link BinaryData} given the data. * @param data to represent as bytes. * @throws NullPointerException If {@code data} is null. */ BinaryData(byte[] data) { Objects.requireNonNull(data, "'data' cannot be null."); this.data = Arrays.copyOf(data, data.length); } /** * Provides {@link InputStream} for the data represented by this {@link BinaryData} object. * * <p><strong>Get InputStream from BinaryData</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.to * * @return {@link InputStream} representing the binary data. */ public InputStream toStream() { return new ByteArrayInputStream(this.data); } /** * Provides {@link Mono} of {@link InputStream} for the data represented by this {@link BinaryData} object. * * @return {@link InputStream} representation of the {@link BinaryData}. */ public Mono<InputStream> toStreamAsync() { return Mono.fromCallable(() -> toStream()); } /** * Create {@link BinaryData} instance with given {@link InputStream} as source of data. The {@link InputStream} is * not closed by this function. * * <p><strong>Create an instance from InputStream</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param inputStream to read bytes from. * @throws UncheckedIOException If any error in reading from {@link InputStream}. * @throws NullPointerException if {@code inputStream} is null. * @return {@link BinaryData} representing the binary data. */ /** * Asynchronously create {@link BinaryData} instance with given {@link InputStream} as source of data. The * {@link InputStream} is not closed by this function. * * @param inputStream to read bytes from. * @throws NullPointerException if {@code inputStream} is null. * @return {@link Mono} of {@link BinaryData} representing the binary data. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) { Objects.requireNonNull(inputStream, "'inputStream' cannot be null."); return Mono.fromCallable(() -> fromStream(inputStream)); } /** * Create {@link BinaryData} instance with given {@link Flux} of {@link ByteBuffer} as source of data. It will * collect all the bytes from {@link ByteBuffer} into {@link BinaryData}. * * <p><strong>Create an instance from String</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param data to use. * @throws NullPointerException if {@code inputStream} is null. * @return {@link Mono} of {@link BinaryData} representing binary data. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) { if (Objects.isNull(data)) { return monoError(LOGGER, new NullPointerException("'data' cannot be null.")); } return FluxUtil.collectBytesInByteBufferStream(data) .flatMap(bytes -> Mono.just(fromBytes(bytes))); } /** * Create {@link BinaryData} instance with given data and character set. * * <p><strong>Create an instance from String</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param data to use. * @param charSet to use. * @throws NullPointerException if {@code inputStream} is null. * @return {@link BinaryData} representing the binary data. */ public static BinaryData fromString(String data, Charset charSet) { Objects.requireNonNull(data, "'data' cannot be null."); return new BinaryData(data.getBytes(charSet)); } /** * Create {@link BinaryData} instance with given data. The {@link String} is converted into bytes using * {@link StandardCharsets * * @param data to use. * @throws NullPointerException if {@code inputStream} is null. * @return {@link BinaryData} representing binary data. */ public static BinaryData fromString(String data) { Objects.requireNonNull(data, "'data' cannot be null."); return new BinaryData(data.getBytes(StandardCharsets.UTF_8)); } /** * Create {@link BinaryData} instance with given byte array data. * * @param data to use. * @return {@link BinaryData} representing the binary data. */ public static BinaryData fromBytes(byte[] data) { return new BinaryData(data); } /** * Serialize the given {@link Object} into {@link BinaryData} using the provided {@link ObjectSerializer}. * * @param data The {@link Object} which needs to be serialized into bytes. * @param serializer to use for serializing the object. * @throws NullPointerException if {@code inputStream} or {@code serializer} is null. * @return {@link BinaryData} representing binary data. */ public static BinaryData fromObject(Object data, ObjectSerializer serializer) { Objects.requireNonNull(data, "'data' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); final ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); serializer.serialize(outputStream, data); return new BinaryData(outputStream.toByteArray()); } /** * Serialize the given {@link Object} into {@link Mono} {@link BinaryData} using the provided * {@link ObjectSerializer}. * * @param data The {@link Object} which needs to be serialized into bytes. * @param serializer to use for serializing the object. * @throws NullPointerException if {@code inputStream} or {@code serializer} is null. * @return {@link Mono} of {@link BinaryData} representing the binary data. */ public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) { Objects.requireNonNull(data, "'data' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); return Mono.fromCallable(() -> fromObject(data, serializer)); } /** * Provides byte array representation of this {@link BinaryData} object. * * @return byte array representation of the the data. */ public byte[] toBytes() { return Arrays.copyOf(this.data, this.data.length); } /** * Provides {@link String} representation of this {@link BinaryData} object. The bytes are converted into * {@link String} using {@link StandardCharsets * * @return {@link String} representation of the data. */ public String toString() { return new String(this.data, StandardCharsets.UTF_8); } /** * Provides {@link String} representation of this {@link BinaryData} object given a character set. * * @param charSet to use to convert bytes into {@link String}. * @return {@link String} representation of the the binary data. */ public String toString(Charset charSet) { return new String(this.data, charSet); } /** * Deserialize the bytes into the {@link Object} of given type by applying the provided {@link ObjectSerializer} on * the data. * * @param clazz representing the type of the Object. * @param serializer to use deserialize data into type. * @param <T> Generic type that the data is deserialized into. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) { Objects.requireNonNull(clazz, "'clazz' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); TypeReference<T> ref = TypeReference.createInstance(clazz); InputStream jsonStream = new ByteArrayInputStream(this.data); return serializer.deserialize(jsonStream, ref); } /** * Return a {@link Mono} by deserialize the bytes into the {@link Object} of given type after applying the provided * {@link ObjectSerializer} on the {@link BinaryData}. * * <p><strong>Gets the specified object</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.to * * @param clazz representing the type of the Object. * @param serializer to use deserialize data into type. * @param <T> Generic type that the data is deserialized into. * @throws NullPointerException if {@code clazz} or {@code serializer} is null. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) { return Mono.fromCallable(() -> toObject(clazz, serializer)); } }
Given this class only has static utility methods it should be fine to use a static logger and suppress the Checkstyle rule.
public static BinaryData fromStream(InputStream inputStream) { Objects.requireNonNull(inputStream, "'inputStream' cannot be null."); final int bufferSize = 1024; try { ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream(); int nRead; byte[] data = new byte[bufferSize]; while ((nRead = inputStream.read(data, 0, data.length)) != -1) { dataOutputBuffer.write(data, 0, nRead); } return fromBytes(dataOutputBuffer.toByteArray()); } catch (IOException ex) { ClientLogger logger = new ClientLogger(BinaryData.class); throw logger.logExceptionAsError(new UncheckedIOException(ex)); } }
ClientLogger logger = new ClientLogger(BinaryData.class);
public static BinaryData fromStream(InputStream inputStream) { Objects.requireNonNull(inputStream, "'inputStream' cannot be null."); final int bufferSize = 1024; try { ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream(); int nRead; byte[] data = new byte[bufferSize]; while ((nRead = inputStream.read(data, 0, data.length)) != -1) { dataOutputBuffer.write(data, 0, nRead); } return fromBytes(dataOutputBuffer.toByteArray()); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } }
class BinaryData { private final byte[] data; BinaryData() { this.data = null; } /** * Create instance of {@link BinaryData} given the data. * @param data to represent as bytes. */ BinaryData(byte[] data) { this.data = data; } /** * Provides {@link InputStream} for the data represented by this {@link BinaryData} object. * * <p><strong>Get InputStream from BinaryData</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.to * * @return {@link InputStream} representing the binary data. */ public InputStream toStream() { return new ByteArrayInputStream(this.data); } /** * Provides {@link Mono} of {@link InputStream} for the data represented by this {@link BinaryData} object. * * @return {@link InputStream} representation of the {@link BinaryData}. */ public Mono<InputStream> toStreamAsync() { return Mono.fromCallable(() -> toStream()); } /** * Create {@link BinaryData} instance with given {@link InputStream} as source of data. The {@link InputStream} is * not closed by this function. * * <p><strong>Create an instance from InputStream</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param inputStream to read bytes from. * @throws UncheckedIOException If any error in reading from {@link InputStream}. * @throws NullPointerException if {@code inputStream} is null. * @return {@link BinaryData} representing the binary data. */ /** * Asynchronously create {@link BinaryData} instance with given {@link InputStream} as source of data. The * {@link InputStream} is not closed by this function. * * @param inputStream to read bytes from. * @throws NullPointerException if {@code inputStream} is null. * @return {@link Mono} of {@link BinaryData} representing the binary data. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) { Objects.requireNonNull(inputStream, "'inputStream' cannot be null."); return Mono.fromCallable(() -> fromStream(inputStream)); } /** * Create {@link BinaryData} instance with given {@link Flux} of {@link ByteBuffer} as source of data. It will * collect all the bytes from {@link ByteBuffer} into {@link BinaryData}. * * <p><strong>Create an instance from String</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param data to use. * @throws NullPointerException if {@code inputStream} is null. * @return {@link Mono} of {@link BinaryData} representing binary data. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) { Objects.requireNonNull(data, "'data' cannot be null."); return FluxUtil.collectBytesInByteBufferStream(data) .flatMap(bytes -> Mono.just(fromBytes(bytes))); } /** * Create {@link BinaryData} instance with given data and character set. * * <p><strong>Create an instance from String</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param data to use. * @param charSet to use. * @throws NullPointerException if {@code inputStream} is null. * @return {@link BinaryData} representing the binary data. */ public static BinaryData fromString(String data, Charset charSet) { Objects.requireNonNull(data, "'data' cannot be null."); return new BinaryData(data.getBytes(charSet)); } /** * Create {@link BinaryData} instance with given data. The {@link String} is converted into bytes using * {@link StandardCharsets * * @param data to use. * @throws NullPointerException if {@code inputStream} is null. * @return {@link BinaryData} representing binary data. */ public static BinaryData fromString(String data) { Objects.requireNonNull(data, "'data' cannot be null."); return new BinaryData(data.getBytes(StandardCharsets.UTF_8)); } /** * Create {@link BinaryData} instance with given byte array data. * * @param data to use. * @return {@link BinaryData} representing the binary data. */ public static BinaryData fromBytes(byte[] data) { return new BinaryData(data); } /** * Serialize the given {@link Object} into {@link BinaryData} using the provided {@link ObjectSerializer}. * * @param data The {@link Object} which needs to be serialized into bytes. * @param serializer to use for serializing the object. * @throws NullPointerException if {@code inputStream} or {@code serializer} is null. * @return {@link BinaryData} representing binary data. */ public static BinaryData fromObject(Object data, ObjectSerializer serializer) { Objects.requireNonNull(data, "'data' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); final ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); serializer.serialize(outputStream, data); return new BinaryData(outputStream.toByteArray()); } /** * Serialize the given {@link Object} into {@link Mono} {@link BinaryData} using the provided * {@link ObjectSerializer}. * * @param data The {@link Object} which needs to be serialized into bytes. * @param serializer to use for serializing the object. * @throws NullPointerException if {@code inputStream} or {@code serializer} is null. * @return {@link Mono} of {@link BinaryData} representing the binary data. */ public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) { Objects.requireNonNull(data, "'data' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); return Mono.fromCallable(() -> fromObject(data, serializer)); } /** * Provides byte array representation of this {@link BinaryData} object. * * @return byte array representation of the the data. */ public byte[] toBytes() { return Arrays.copyOf(this.data, this.data.length); } /** * Provides {@link String} representation of this {@link BinaryData} object. The bytes are converted into * {@link String} using {@link StandardCharsets * * @return {@link String} representation of the data. */ public String toString() { return new String(this.data, StandardCharsets.UTF_8); } /** * Provides {@link String} representation of this {@link BinaryData} object given a character set. * * @param charSet to use to convert bytes into {@link String}. * @return {@link String} representation of the the binary data. */ public String toString(Charset charSet) { return new String(this.data, charSet); } /** * Deserialize the bytes into the {@link Object} of given type by applying the provided {@link ObjectSerializer} on * the data. * * @param clazz representing the type of the Object. * @param serializer to use deserialize data into type. * @param <T> Generic type that the data is deserialized into. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) { Objects.requireNonNull(clazz, "'clazz' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); TypeReference<T> ref = TypeReference.createInstance(clazz); InputStream jsonStream = new ByteArrayInputStream(this.data); return serializer.deserialize(jsonStream, ref); } /** * Return a {@link Mono} by deserialize the bytes into the {@link Object} of given type after applying the provided * {@link ObjectSerializer} on the {@link BinaryData}. * * <p><strong>Gets the specified object</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.to * * @param clazz representing the type of the Object. * @param serializer to use deserialize data into type. * @param <T> Generic type that the data is deserialized into. * @throws NullPointerException if {@code clazz} or {@code serializer} is null. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) { return Mono.fromCallable(() -> toObject(clazz, serializer)); } }
class BinaryData { private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class); private final byte[] data; /** * Create instance of {@link BinaryData} given the data. * @param data to represent as bytes. * @throws NullPointerException If {@code data} is null. */ BinaryData(byte[] data) { Objects.requireNonNull(data, "'data' cannot be null."); this.data = Arrays.copyOf(data, data.length); } /** * Provides {@link InputStream} for the data represented by this {@link BinaryData} object. * * <p><strong>Get InputStream from BinaryData</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.to * * @return {@link InputStream} representing the binary data. */ public InputStream toStream() { return new ByteArrayInputStream(this.data); } /** * Provides {@link Mono} of {@link InputStream} for the data represented by this {@link BinaryData} object. * * @return {@link InputStream} representation of the {@link BinaryData}. */ public Mono<InputStream> toStreamAsync() { return Mono.fromCallable(() -> toStream()); } /** * Create {@link BinaryData} instance with given {@link InputStream} as source of data. The {@link InputStream} is * not closed by this function. * * <p><strong>Create an instance from InputStream</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param inputStream to read bytes from. * @throws UncheckedIOException If any error in reading from {@link InputStream}. * @throws NullPointerException if {@code inputStream} is null. * @return {@link BinaryData} representing the binary data. */ /** * Asynchronously create {@link BinaryData} instance with given {@link InputStream} as source of data. The * {@link InputStream} is not closed by this function. * * @param inputStream to read bytes from. * @throws NullPointerException if {@code inputStream} is null. * @return {@link Mono} of {@link BinaryData} representing the binary data. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) { Objects.requireNonNull(inputStream, "'inputStream' cannot be null."); return Mono.fromCallable(() -> fromStream(inputStream)); } /** * Create {@link BinaryData} instance with given {@link Flux} of {@link ByteBuffer} as source of data. It will * collect all the bytes from {@link ByteBuffer} into {@link BinaryData}. * * <p><strong>Create an instance from String</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param data to use. * @throws NullPointerException if {@code inputStream} is null. * @return {@link Mono} of {@link BinaryData} representing binary data. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) { if (Objects.isNull(data)) { return monoError(LOGGER, new NullPointerException("'data' cannot be null.")); } return FluxUtil.collectBytesInByteBufferStream(data) .flatMap(bytes -> Mono.just(fromBytes(bytes))); } /** * Create {@link BinaryData} instance with given data and character set. * * <p><strong>Create an instance from String</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param data to use. * @param charSet to use. * @throws NullPointerException if {@code inputStream} is null. * @return {@link BinaryData} representing the binary data. */ public static BinaryData fromString(String data, Charset charSet) { Objects.requireNonNull(data, "'data' cannot be null."); return new BinaryData(data.getBytes(charSet)); } /** * Create {@link BinaryData} instance with given data. The {@link String} is converted into bytes using * {@link StandardCharsets * * @param data to use. * @throws NullPointerException if {@code inputStream} is null. * @return {@link BinaryData} representing binary data. */ public static BinaryData fromString(String data) { Objects.requireNonNull(data, "'data' cannot be null."); return new BinaryData(data.getBytes(StandardCharsets.UTF_8)); } /** * Create {@link BinaryData} instance with given byte array data. * * @param data to use. * @return {@link BinaryData} representing the binary data. */ public static BinaryData fromBytes(byte[] data) { return new BinaryData(data); } /** * Serialize the given {@link Object} into {@link BinaryData} using the provided {@link ObjectSerializer}. * * @param data The {@link Object} which needs to be serialized into bytes. * @param serializer to use for serializing the object. * @throws NullPointerException if {@code inputStream} or {@code serializer} is null. * @return {@link BinaryData} representing binary data. */ public static BinaryData fromObject(Object data, ObjectSerializer serializer) { Objects.requireNonNull(data, "'data' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); final ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); serializer.serialize(outputStream, data); return new BinaryData(outputStream.toByteArray()); } /** * Serialize the given {@link Object} into {@link Mono} {@link BinaryData} using the provided * {@link ObjectSerializer}. * * @param data The {@link Object} which needs to be serialized into bytes. * @param serializer to use for serializing the object. * @throws NullPointerException if {@code inputStream} or {@code serializer} is null. * @return {@link Mono} of {@link BinaryData} representing the binary data. */ public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) { Objects.requireNonNull(data, "'data' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); return Mono.fromCallable(() -> fromObject(data, serializer)); } /** * Provides byte array representation of this {@link BinaryData} object. * * @return byte array representation of the the data. */ public byte[] toBytes() { return Arrays.copyOf(this.data, this.data.length); } /** * Provides {@link String} representation of this {@link BinaryData} object. The bytes are converted into * {@link String} using {@link StandardCharsets * * @return {@link String} representation of the data. */ public String toString() { return new String(this.data, StandardCharsets.UTF_8); } /** * Provides {@link String} representation of this {@link BinaryData} object given a character set. * * @param charSet to use to convert bytes into {@link String}. * @return {@link String} representation of the the binary data. */ public String toString(Charset charSet) { return new String(this.data, charSet); } /** * Deserialize the bytes into the {@link Object} of given type by applying the provided {@link ObjectSerializer} on * the data. * * @param clazz representing the type of the Object. * @param serializer to use deserialize data into type. * @param <T> Generic type that the data is deserialized into. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) { Objects.requireNonNull(clazz, "'clazz' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); TypeReference<T> ref = TypeReference.createInstance(clazz); InputStream jsonStream = new ByteArrayInputStream(this.data); return serializer.deserialize(jsonStream, ref); } /** * Return a {@link Mono} by deserialize the bytes into the {@link Object} of given type after applying the provided * {@link ObjectSerializer} on the {@link BinaryData}. * * <p><strong>Gets the specified object</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.to * * @param clazz representing the type of the Object. * @param serializer to use deserialize data into type. * @param <T> Generic type that the data is deserialized into. * @throws NullPointerException if {@code clazz} or {@code serializer} is null. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) { return Mono.fromCallable(() -> toObject(clazz, serializer)); } }
yes we can do this when we move it azure-core
public static BinaryData fromStream(InputStream inputStream) { Objects.requireNonNull(inputStream, "'inputStream' cannot be null."); final int bufferSize = 1024; try { ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream(); int nRead; byte[] data = new byte[bufferSize]; while ((nRead = inputStream.read(data, 0, data.length)) != -1) { dataOutputBuffer.write(data, 0, nRead); } return fromBytes(dataOutputBuffer.toByteArray()); } catch (IOException ex) { ClientLogger logger = new ClientLogger(BinaryData.class); throw logger.logExceptionAsError(new UncheckedIOException(ex)); } }
ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream();
public static BinaryData fromStream(InputStream inputStream) { Objects.requireNonNull(inputStream, "'inputStream' cannot be null."); final int bufferSize = 1024; try { ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream(); int nRead; byte[] data = new byte[bufferSize]; while ((nRead = inputStream.read(data, 0, data.length)) != -1) { dataOutputBuffer.write(data, 0, nRead); } return fromBytes(dataOutputBuffer.toByteArray()); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } }
class BinaryData { private final byte[] data; BinaryData() { this.data = null; } /** * Create instance of {@link BinaryData} given the data. * @param data to represent as bytes. */ BinaryData(byte[] data) { this.data = data; } /** * Provides {@link InputStream} for the data represented by this {@link BinaryData} object. * * <p><strong>Get InputStream from BinaryData</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.to * * @return {@link InputStream} representing the binary data. */ public InputStream toStream() { return new ByteArrayInputStream(this.data); } /** * Provides {@link Mono} of {@link InputStream} for the data represented by this {@link BinaryData} object. * * @return {@link InputStream} representation of the {@link BinaryData}. */ public Mono<InputStream> toStreamAsync() { return Mono.fromCallable(() -> toStream()); } /** * Create {@link BinaryData} instance with given {@link InputStream} as source of data. The {@link InputStream} is * not closed by this function. * * <p><strong>Create an instance from InputStream</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param inputStream to read bytes from. * @throws UncheckedIOException If any error in reading from {@link InputStream}. * @throws NullPointerException if {@code inputStream} is null. * @return {@link BinaryData} representing the binary data. */ /** * Asynchronously create {@link BinaryData} instance with given {@link InputStream} as source of data. The * {@link InputStream} is not closed by this function. * * @param inputStream to read bytes from. * @throws NullPointerException if {@code inputStream} is null. * @return {@link Mono} of {@link BinaryData} representing the binary data. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) { Objects.requireNonNull(inputStream, "'inputStream' cannot be null."); return Mono.fromCallable(() -> fromStream(inputStream)); } /** * Create {@link BinaryData} instance with given {@link Flux} of {@link ByteBuffer} as source of data. It will * collect all the bytes from {@link ByteBuffer} into {@link BinaryData}. * * <p><strong>Create an instance from String</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param data to use. * @throws NullPointerException if {@code inputStream} is null. * @return {@link Mono} of {@link BinaryData} representing binary data. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) { Objects.requireNonNull(data, "'data' cannot be null."); return FluxUtil.collectBytesInByteBufferStream(data) .flatMap(bytes -> Mono.just(fromBytes(bytes))); } /** * Create {@link BinaryData} instance with given data and character set. * * <p><strong>Create an instance from String</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param data to use. * @param charSet to use. * @throws NullPointerException if {@code inputStream} is null. * @return {@link BinaryData} representing the binary data. */ public static BinaryData fromString(String data, Charset charSet) { Objects.requireNonNull(data, "'data' cannot be null."); return new BinaryData(data.getBytes(charSet)); } /** * Create {@link BinaryData} instance with given data. The {@link String} is converted into bytes using * {@link StandardCharsets * * @param data to use. * @throws NullPointerException if {@code inputStream} is null. * @return {@link BinaryData} representing binary data. */ public static BinaryData fromString(String data) { Objects.requireNonNull(data, "'data' cannot be null."); return new BinaryData(data.getBytes(StandardCharsets.UTF_8)); } /** * Create {@link BinaryData} instance with given byte array data. * * @param data to use. * @return {@link BinaryData} representing the binary data. */ public static BinaryData fromBytes(byte[] data) { return new BinaryData(data); } /** * Serialize the given {@link Object} into {@link BinaryData} using the provided {@link ObjectSerializer}. * * @param data The {@link Object} which needs to be serialized into bytes. * @param serializer to use for serializing the object. * @throws NullPointerException if {@code inputStream} or {@code serializer} is null. * @return {@link BinaryData} representing binary data. */ public static BinaryData fromObject(Object data, ObjectSerializer serializer) { Objects.requireNonNull(data, "'data' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); final ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); serializer.serialize(outputStream, data); return new BinaryData(outputStream.toByteArray()); } /** * Serialize the given {@link Object} into {@link Mono} {@link BinaryData} using the provided * {@link ObjectSerializer}. * * @param data The {@link Object} which needs to be serialized into bytes. * @param serializer to use for serializing the object. * @throws NullPointerException if {@code inputStream} or {@code serializer} is null. * @return {@link Mono} of {@link BinaryData} representing the binary data. */ public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) { Objects.requireNonNull(data, "'data' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); return Mono.fromCallable(() -> fromObject(data, serializer)); } /** * Provides byte array representation of this {@link BinaryData} object. * * @return byte array representation of the the data. */ public byte[] toBytes() { return Arrays.copyOf(this.data, this.data.length); } /** * Provides {@link String} representation of this {@link BinaryData} object. The bytes are converted into * {@link String} using {@link StandardCharsets * * @return {@link String} representation of the data. */ public String toString() { return new String(this.data, StandardCharsets.UTF_8); } /** * Provides {@link String} representation of this {@link BinaryData} object given a character set. * * @param charSet to use to convert bytes into {@link String}. * @return {@link String} representation of the the binary data. */ public String toString(Charset charSet) { return new String(this.data, charSet); } /** * Deserialize the bytes into the {@link Object} of given type by applying the provided {@link ObjectSerializer} on * the data. * * @param clazz representing the type of the Object. * @param serializer to use deserialize data into type. * @param <T> Generic type that the data is deserialized into. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) { Objects.requireNonNull(clazz, "'clazz' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); TypeReference<T> ref = TypeReference.createInstance(clazz); InputStream jsonStream = new ByteArrayInputStream(this.data); return serializer.deserialize(jsonStream, ref); } /** * Return a {@link Mono} by deserialize the bytes into the {@link Object} of given type after applying the provided * {@link ObjectSerializer} on the {@link BinaryData}. * * <p><strong>Gets the specified object</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.to * * @param clazz representing the type of the Object. * @param serializer to use deserialize data into type. * @param <T> Generic type that the data is deserialized into. * @throws NullPointerException if {@code clazz} or {@code serializer} is null. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) { return Mono.fromCallable(() -> toObject(clazz, serializer)); } }
class BinaryData { private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class); private final byte[] data; /** * Create instance of {@link BinaryData} given the data. * @param data to represent as bytes. * @throws NullPointerException If {@code data} is null. */ BinaryData(byte[] data) { Objects.requireNonNull(data, "'data' cannot be null."); this.data = Arrays.copyOf(data, data.length); } /** * Provides {@link InputStream} for the data represented by this {@link BinaryData} object. * * <p><strong>Get InputStream from BinaryData</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.to * * @return {@link InputStream} representing the binary data. */ public InputStream toStream() { return new ByteArrayInputStream(this.data); } /** * Provides {@link Mono} of {@link InputStream} for the data represented by this {@link BinaryData} object. * * @return {@link InputStream} representation of the {@link BinaryData}. */ public Mono<InputStream> toStreamAsync() { return Mono.fromCallable(() -> toStream()); } /** * Create {@link BinaryData} instance with given {@link InputStream} as source of data. The {@link InputStream} is * not closed by this function. * * <p><strong>Create an instance from InputStream</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param inputStream to read bytes from. * @throws UncheckedIOException If any error in reading from {@link InputStream}. * @throws NullPointerException if {@code inputStream} is null. * @return {@link BinaryData} representing the binary data. */ /** * Asynchronously create {@link BinaryData} instance with given {@link InputStream} as source of data. The * {@link InputStream} is not closed by this function. * * @param inputStream to read bytes from. * @throws NullPointerException if {@code inputStream} is null. * @return {@link Mono} of {@link BinaryData} representing the binary data. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) { Objects.requireNonNull(inputStream, "'inputStream' cannot be null."); return Mono.fromCallable(() -> fromStream(inputStream)); } /** * Create {@link BinaryData} instance with given {@link Flux} of {@link ByteBuffer} as source of data. It will * collect all the bytes from {@link ByteBuffer} into {@link BinaryData}. * * <p><strong>Create an instance from String</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param data to use. * @throws NullPointerException if {@code inputStream} is null. * @return {@link Mono} of {@link BinaryData} representing binary data. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) { if (Objects.isNull(data)) { return monoError(LOGGER, new NullPointerException("'data' cannot be null.")); } return FluxUtil.collectBytesInByteBufferStream(data) .flatMap(bytes -> Mono.just(fromBytes(bytes))); } /** * Create {@link BinaryData} instance with given data and character set. * * <p><strong>Create an instance from String</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param data to use. * @param charSet to use. * @throws NullPointerException if {@code inputStream} is null. * @return {@link BinaryData} representing the binary data. */ public static BinaryData fromString(String data, Charset charSet) { Objects.requireNonNull(data, "'data' cannot be null."); return new BinaryData(data.getBytes(charSet)); } /** * Create {@link BinaryData} instance with given data. The {@link String} is converted into bytes using * {@link StandardCharsets * * @param data to use. * @throws NullPointerException if {@code inputStream} is null. * @return {@link BinaryData} representing binary data. */ public static BinaryData fromString(String data) { Objects.requireNonNull(data, "'data' cannot be null."); return new BinaryData(data.getBytes(StandardCharsets.UTF_8)); } /** * Create {@link BinaryData} instance with given byte array data. * * @param data to use. * @return {@link BinaryData} representing the binary data. */ public static BinaryData fromBytes(byte[] data) { return new BinaryData(data); } /** * Serialize the given {@link Object} into {@link BinaryData} using the provided {@link ObjectSerializer}. * * @param data The {@link Object} which needs to be serialized into bytes. * @param serializer to use for serializing the object. * @throws NullPointerException if {@code inputStream} or {@code serializer} is null. * @return {@link BinaryData} representing binary data. */ public static BinaryData fromObject(Object data, ObjectSerializer serializer) { Objects.requireNonNull(data, "'data' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); final ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); serializer.serialize(outputStream, data); return new BinaryData(outputStream.toByteArray()); } /** * Serialize the given {@link Object} into {@link Mono} {@link BinaryData} using the provided * {@link ObjectSerializer}. * * @param data The {@link Object} which needs to be serialized into bytes. * @param serializer to use for serializing the object. * @throws NullPointerException if {@code inputStream} or {@code serializer} is null. * @return {@link Mono} of {@link BinaryData} representing the binary data. */ public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) { Objects.requireNonNull(data, "'data' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); return Mono.fromCallable(() -> fromObject(data, serializer)); } /** * Provides byte array representation of this {@link BinaryData} object. * * @return byte array representation of the the data. */ public byte[] toBytes() { return Arrays.copyOf(this.data, this.data.length); } /** * Provides {@link String} representation of this {@link BinaryData} object. The bytes are converted into * {@link String} using {@link StandardCharsets * * @return {@link String} representation of the data. */ public String toString() { return new String(this.data, StandardCharsets.UTF_8); } /** * Provides {@link String} representation of this {@link BinaryData} object given a character set. * * @param charSet to use to convert bytes into {@link String}. * @return {@link String} representation of the the binary data. */ public String toString(Charset charSet) { return new String(this.data, charSet); } /** * Deserialize the bytes into the {@link Object} of given type by applying the provided {@link ObjectSerializer} on * the data. * * @param clazz representing the type of the Object. * @param serializer to use deserialize data into type. * @param <T> Generic type that the data is deserialized into. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) { Objects.requireNonNull(clazz, "'clazz' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); TypeReference<T> ref = TypeReference.createInstance(clazz); InputStream jsonStream = new ByteArrayInputStream(this.data); return serializer.deserialize(jsonStream, ref); } /** * Return a {@link Mono} by deserialize the bytes into the {@link Object} of given type after applying the provided * {@link ObjectSerializer} on the {@link BinaryData}. * * <p><strong>Gets the specified object</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.to * * @param clazz representing the type of the Object. * @param serializer to use deserialize data into type. * @param <T> Generic type that the data is deserialized into. * @throws NullPointerException if {@code clazz} or {@code serializer} is null. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) { return Mono.fromCallable(() -> toObject(clazz, serializer)); } }
For methods that return Mono/Flux, use the error channel instead of throwing exception.
public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) { Objects.requireNonNull(data, "'data' cannot be null."); return FluxUtil.collectBytesInByteBufferStream(data) .flatMap(bytes -> Mono.just(fromBytes(bytes))); }
Objects.requireNonNull(data, "'data' cannot be null.");
public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) { if (Objects.isNull(data)) { return monoError(LOGGER, new NullPointerException("'data' cannot be null.")); } return FluxUtil.collectBytesInByteBufferStream(data) .flatMap(bytes -> Mono.just(fromBytes(bytes))); }
class BinaryData { private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class); private final byte[] data; BinaryData() { this.data = null; } /** * Create instance of {@link BinaryData} given the data. * @param data to represent as bytes. * @throws NullPointerException If {@code data} is null. */ BinaryData(byte[] data) { Objects.requireNonNull(data, "'data' cannot be null."); this.data = Arrays.copyOf(data, data.length); } /** * Provides {@link InputStream} for the data represented by this {@link BinaryData} object. * * <p><strong>Get InputStream from BinaryData</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.to * * @return {@link InputStream} representing the binary data. */ public InputStream toStream() { return new ByteArrayInputStream(this.data); } /** * Provides {@link Mono} of {@link InputStream} for the data represented by this {@link BinaryData} object. * * @return {@link InputStream} representation of the {@link BinaryData}. */ public Mono<InputStream> toStreamAsync() { return Mono.fromCallable(() -> toStream()); } /** * Create {@link BinaryData} instance with given {@link InputStream} as source of data. The {@link InputStream} is * not closed by this function. * * <p><strong>Create an instance from InputStream</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param inputStream to read bytes from. * @throws UncheckedIOException If any error in reading from {@link InputStream}. * @throws NullPointerException if {@code inputStream} is null. * @return {@link BinaryData} representing the binary data. */ public static BinaryData fromStream(InputStream inputStream) { Objects.requireNonNull(inputStream, "'inputStream' cannot be null."); final int bufferSize = 1024; try { ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream(); int nRead; byte[] data = new byte[bufferSize]; while ((nRead = inputStream.read(data, 0, data.length)) != -1) { dataOutputBuffer.write(data, 0, nRead); } return fromBytes(dataOutputBuffer.toByteArray()); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } } /** * Asynchronously create {@link BinaryData} instance with given {@link InputStream} as source of data. The * {@link InputStream} is not closed by this function. * * @param inputStream to read bytes from. * @throws NullPointerException if {@code inputStream} is null. * @return {@link Mono} of {@link BinaryData} representing the binary data. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) { Objects.requireNonNull(inputStream, "'inputStream' cannot be null."); return Mono.fromCallable(() -> fromStream(inputStream)); } /** * Create {@link BinaryData} instance with given {@link Flux} of {@link ByteBuffer} as source of data. It will * collect all the bytes from {@link ByteBuffer} into {@link BinaryData}. * * <p><strong>Create an instance from String</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param data to use. * @throws NullPointerException if {@code inputStream} is null. * @return {@link Mono} of {@link BinaryData} representing binary data. */ /** * Create {@link BinaryData} instance with given data and character set. * * <p><strong>Create an instance from String</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param data to use. * @param charSet to use. * @throws NullPointerException if {@code inputStream} is null. * @return {@link BinaryData} representing the binary data. */ public static BinaryData fromString(String data, Charset charSet) { Objects.requireNonNull(data, "'data' cannot be null."); return new BinaryData(data.getBytes(charSet)); } /** * Create {@link BinaryData} instance with given data. The {@link String} is converted into bytes using * {@link StandardCharsets * * @param data to use. * @throws NullPointerException if {@code inputStream} is null. * @return {@link BinaryData} representing binary data. */ public static BinaryData fromString(String data) { Objects.requireNonNull(data, "'data' cannot be null."); return new BinaryData(data.getBytes(StandardCharsets.UTF_8)); } /** * Create {@link BinaryData} instance with given byte array data. * * @param data to use. * @return {@link BinaryData} representing the binary data. */ public static BinaryData fromBytes(byte[] data) { return new BinaryData(data); } /** * Serialize the given {@link Object} into {@link BinaryData} using the provided {@link ObjectSerializer}. * * @param data The {@link Object} which needs to be serialized into bytes. * @param serializer to use for serializing the object. * @throws NullPointerException if {@code inputStream} or {@code serializer} is null. * @return {@link BinaryData} representing binary data. */ public static BinaryData fromObject(Object data, ObjectSerializer serializer) { Objects.requireNonNull(data, "'data' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); final ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); serializer.serialize(outputStream, data); return new BinaryData(outputStream.toByteArray()); } /** * Serialize the given {@link Object} into {@link Mono} {@link BinaryData} using the provided * {@link ObjectSerializer}. * * @param data The {@link Object} which needs to be serialized into bytes. * @param serializer to use for serializing the object. * @throws NullPointerException if {@code inputStream} or {@code serializer} is null. * @return {@link Mono} of {@link BinaryData} representing the binary data. */ public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) { Objects.requireNonNull(data, "'data' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); return Mono.fromCallable(() -> fromObject(data, serializer)); } /** * Provides byte array representation of this {@link BinaryData} object. * * @return byte array representation of the the data. */ public byte[] toBytes() { return Arrays.copyOf(this.data, this.data.length); } /** * Provides {@link String} representation of this {@link BinaryData} object. The bytes are converted into * {@link String} using {@link StandardCharsets * * @return {@link String} representation of the data. */ public String toString() { return new String(this.data, StandardCharsets.UTF_8); } /** * Provides {@link String} representation of this {@link BinaryData} object given a character set. * * @param charSet to use to convert bytes into {@link String}. * @return {@link String} representation of the the binary data. */ public String toString(Charset charSet) { return new String(this.data, charSet); } /** * Deserialize the bytes into the {@link Object} of given type by applying the provided {@link ObjectSerializer} on * the data. * * @param clazz representing the type of the Object. * @param serializer to use deserialize data into type. * @param <T> Generic type that the data is deserialized into. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) { Objects.requireNonNull(clazz, "'clazz' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); TypeReference<T> ref = TypeReference.createInstance(clazz); InputStream jsonStream = new ByteArrayInputStream(this.data); return serializer.deserialize(jsonStream, ref); } /** * Return a {@link Mono} by deserialize the bytes into the {@link Object} of given type after applying the provided * {@link ObjectSerializer} on the {@link BinaryData}. * * <p><strong>Gets the specified object</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.to * * @param clazz representing the type of the Object. * @param serializer to use deserialize data into type. * @param <T> Generic type that the data is deserialized into. * @throws NullPointerException if {@code clazz} or {@code serializer} is null. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) { return Mono.fromCallable(() -> toObject(clazz, serializer)); } }
class BinaryData { private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class); private final byte[] data; /** * Create instance of {@link BinaryData} given the data. * @param data to represent as bytes. * @throws NullPointerException If {@code data} is null. */ BinaryData(byte[] data) { Objects.requireNonNull(data, "'data' cannot be null."); this.data = Arrays.copyOf(data, data.length); } /** * Provides {@link InputStream} for the data represented by this {@link BinaryData} object. * * <p><strong>Get InputStream from BinaryData</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.to * * @return {@link InputStream} representing the binary data. */ public InputStream toStream() { return new ByteArrayInputStream(this.data); } /** * Provides {@link Mono} of {@link InputStream} for the data represented by this {@link BinaryData} object. * * @return {@link InputStream} representation of the {@link BinaryData}. */ public Mono<InputStream> toStreamAsync() { return Mono.fromCallable(() -> toStream()); } /** * Create {@link BinaryData} instance with given {@link InputStream} as source of data. The {@link InputStream} is * not closed by this function. * * <p><strong>Create an instance from InputStream</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param inputStream to read bytes from. * @throws UncheckedIOException If any error in reading from {@link InputStream}. * @throws NullPointerException if {@code inputStream} is null. * @return {@link BinaryData} representing the binary data. */ public static BinaryData fromStream(InputStream inputStream) { Objects.requireNonNull(inputStream, "'inputStream' cannot be null."); final int bufferSize = 1024; try { ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream(); int nRead; byte[] data = new byte[bufferSize]; while ((nRead = inputStream.read(data, 0, data.length)) != -1) { dataOutputBuffer.write(data, 0, nRead); } return fromBytes(dataOutputBuffer.toByteArray()); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } } /** * Asynchronously create {@link BinaryData} instance with given {@link InputStream} as source of data. The * {@link InputStream} is not closed by this function. * * @param inputStream to read bytes from. * @throws NullPointerException if {@code inputStream} is null. * @return {@link Mono} of {@link BinaryData} representing the binary data. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) { Objects.requireNonNull(inputStream, "'inputStream' cannot be null."); return Mono.fromCallable(() -> fromStream(inputStream)); } /** * Create {@link BinaryData} instance with given {@link Flux} of {@link ByteBuffer} as source of data. It will * collect all the bytes from {@link ByteBuffer} into {@link BinaryData}. * * <p><strong>Create an instance from String</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param data to use. * @throws NullPointerException if {@code inputStream} is null. * @return {@link Mono} of {@link BinaryData} representing binary data. */ /** * Create {@link BinaryData} instance with given data and character set. * * <p><strong>Create an instance from String</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param data to use. * @param charSet to use. * @throws NullPointerException if {@code inputStream} is null. * @return {@link BinaryData} representing the binary data. */ public static BinaryData fromString(String data, Charset charSet) { Objects.requireNonNull(data, "'data' cannot be null."); return new BinaryData(data.getBytes(charSet)); } /** * Create {@link BinaryData} instance with given data. The {@link String} is converted into bytes using * {@link StandardCharsets * * @param data to use. * @throws NullPointerException if {@code inputStream} is null. * @return {@link BinaryData} representing binary data. */ public static BinaryData fromString(String data) { Objects.requireNonNull(data, "'data' cannot be null."); return new BinaryData(data.getBytes(StandardCharsets.UTF_8)); } /** * Create {@link BinaryData} instance with given byte array data. * * @param data to use. * @return {@link BinaryData} representing the binary data. */ public static BinaryData fromBytes(byte[] data) { return new BinaryData(data); } /** * Serialize the given {@link Object} into {@link BinaryData} using the provided {@link ObjectSerializer}. * * @param data The {@link Object} which needs to be serialized into bytes. * @param serializer to use for serializing the object. * @throws NullPointerException if {@code inputStream} or {@code serializer} is null. * @return {@link BinaryData} representing binary data. */ public static BinaryData fromObject(Object data, ObjectSerializer serializer) { Objects.requireNonNull(data, "'data' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); final ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); serializer.serialize(outputStream, data); return new BinaryData(outputStream.toByteArray()); } /** * Serialize the given {@link Object} into {@link Mono} {@link BinaryData} using the provided * {@link ObjectSerializer}. * * @param data The {@link Object} which needs to be serialized into bytes. * @param serializer to use for serializing the object. * @throws NullPointerException if {@code inputStream} or {@code serializer} is null. * @return {@link Mono} of {@link BinaryData} representing the binary data. */ public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) { Objects.requireNonNull(data, "'data' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); return Mono.fromCallable(() -> fromObject(data, serializer)); } /** * Provides byte array representation of this {@link BinaryData} object. * * @return byte array representation of the the data. */ public byte[] toBytes() { return Arrays.copyOf(this.data, this.data.length); } /** * Provides {@link String} representation of this {@link BinaryData} object. The bytes are converted into * {@link String} using {@link StandardCharsets * * @return {@link String} representation of the data. */ public String toString() { return new String(this.data, StandardCharsets.UTF_8); } /** * Provides {@link String} representation of this {@link BinaryData} object given a character set. * * @param charSet to use to convert bytes into {@link String}. * @return {@link String} representation of the the binary data. */ public String toString(Charset charSet) { return new String(this.data, charSet); } /** * Deserialize the bytes into the {@link Object} of given type by applying the provided {@link ObjectSerializer} on * the data. * * @param clazz representing the type of the Object. * @param serializer to use deserialize data into type. * @param <T> Generic type that the data is deserialized into. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) { Objects.requireNonNull(clazz, "'clazz' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); TypeReference<T> ref = TypeReference.createInstance(clazz); InputStream jsonStream = new ByteArrayInputStream(this.data); return serializer.deserialize(jsonStream, ref); } /** * Return a {@link Mono} by deserialize the bytes into the {@link Object} of given type after applying the provided * {@link ObjectSerializer} on the {@link BinaryData}. * * <p><strong>Gets the specified object</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.to * * @param clazz representing the type of the Object. * @param serializer to use deserialize data into type. * @param <T> Generic type that the data is deserialized into. * @throws NullPointerException if {@code clazz} or {@code serializer} is null. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) { return Mono.fromCallable(() -> toObject(clazz, serializer)); } }
possibly include the training document's modelID here since it is relevant for composed model
public static void main(final String[] args) { FormTrainingClient client = new FormTrainingClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("https: .buildClient(); String model1TrainingFiles = "{SAS_URL_of_your_container_in_blob_storage_for_model_1}"; SyncPoller<FormRecognizerOperationResult, CustomFormModel> model1Poller = client.beginTraining(model1TrainingFiles, true); String model2TrainingFiles = "{SAS_URL_of_your_container_in_blob_storage_for_model_2}"; SyncPoller<FormRecognizerOperationResult, CustomFormModel> model2Poller = client.beginTraining(model2TrainingFiles, true); String labeledModelId1 = model1Poller.getFinalResult().getModelId(); String labeledModelId2 = model2Poller.getFinalResult().getModelId(); final CustomFormModel customFormModel = client.beginCreateComposedModel(Arrays.asList(labeledModelId1, labeledModelId2), new CreateComposedModelOptions() .setModelDisplayName("my composed model name") .setPollInterval(Duration.ofSeconds(5)), Context.NONE) .getFinalResult(); System.out.printf("Model Id: %s%n", customFormModel.getModelId()); System.out.printf("Model Status: %s%n", customFormModel.getModelStatus()); System.out.printf("Model display name: %s%n", customFormModel.getModelDisplayName()); System.out.printf("Is this a composed model: %s%n", customFormModel.getCustomModelProperties().isComposed()); System.out.printf("Composed model creation started on: ", customFormModel.getTrainingStartedOn()); System.out.printf("Composed model creation completed on: ", customFormModel.getTrainingCompletedOn()); System.out.println("Recognized Fields:"); customFormModel.getSubmodels().forEach(customFormSubmodel -> { System.out.printf("The subModel with form type %s has accuracy: %.2f%n", customFormSubmodel.getFormType(), customFormSubmodel.getAccuracy()); customFormSubmodel.getFields().forEach((label, customFormModelField) -> System.out.printf("The model found field '%s' to have name: %s with an accuracy: %.2f%n", label, customFormModelField.getName(), customFormModelField.getAccuracy())); }); System.out.println(); customFormModel.getTrainingDocuments().forEach(trainingDocumentInfo -> { System.out.printf("Document name: %s%n", trainingDocumentInfo.getName()); System.out.printf("Document status: %s%n", trainingDocumentInfo.getStatus()); System.out.printf("Document page count: %d%n", trainingDocumentInfo.getPageCount()); if (!trainingDocumentInfo.getErrors().isEmpty()) { System.out.println("Document Errors:"); trainingDocumentInfo.getErrors().forEach(formRecognizerError -> System.out.printf("Error code %s, Error message: %s%n", formRecognizerError.getErrorCode(), formRecognizerError.getMessage())); } }); }
System.out.printf("Document name: %s%n", trainingDocumentInfo.getName());
public static void main(final String[] args) { FormTrainingClient client = new FormTrainingClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("https: .buildClient(); String model1TrainingFiles = "{SAS_URL_of_your_container_in_blob_storage_for_model_1}"; SyncPoller<FormRecognizerOperationResult, CustomFormModel> model1Poller = client.beginTraining(model1TrainingFiles, true); String model2TrainingFiles = "{SAS_URL_of_your_container_in_blob_storage_for_model_2}"; SyncPoller<FormRecognizerOperationResult, CustomFormModel> model2Poller = client.beginTraining(model2TrainingFiles, true); String labeledModelId1 = model1Poller.getFinalResult().getModelId(); String labeledModelId2 = model2Poller.getFinalResult().getModelId(); final CustomFormModel customFormModel = client.beginCreateComposedModel(Arrays.asList(labeledModelId1, labeledModelId2), new CreateComposedModelOptions() .setModelName("my composed model name") .setPollInterval(Duration.ofSeconds(5)), Context.NONE) .getFinalResult(); System.out.printf("Model Id: %s%n", customFormModel.getModelId()); System.out.printf("Model Status: %s%n", customFormModel.getModelStatus()); System.out.printf("Model name: %s%n", customFormModel.getModelName()); System.out.printf("Is this a composed model: %s%n", customFormModel.getCustomModelProperties().isComposed()); System.out.printf("Composed model creation started on: ", customFormModel.getTrainingStartedOn()); System.out.printf("Composed model creation completed on: ", customFormModel.getTrainingCompletedOn()); System.out.println("Recognized Fields:"); customFormModel.getSubmodels().forEach(customFormSubmodel -> { System.out.printf("Submodel Id: %s%n", customFormSubmodel.getModelId()); System.out.printf("The subModel with form type %s has accuracy: %.2f%n", customFormSubmodel.getFormType(), customFormSubmodel.getAccuracy()); customFormSubmodel.getFields().forEach((label, customFormModelField) -> System.out.printf("The model found field '%s' to have name: %s with an accuracy: %.2f%n", label, customFormModelField.getName(), customFormModelField.getAccuracy())); }); System.out.println(); customFormModel.getTrainingDocuments().forEach(trainingDocumentInfo -> { System.out.printf("Document name: %s%n", trainingDocumentInfo.getName()); System.out.printf("Document was provided to train model with Id : %s%n", trainingDocumentInfo.getModelId()); System.out.printf("Document name: %s%n", trainingDocumentInfo.getName()); System.out.printf("Document status: %s%n", trainingDocumentInfo.getStatus()); System.out.printf("Document page count: %d%n", trainingDocumentInfo.getPageCount()); if (!trainingDocumentInfo.getErrors().isEmpty()) { System.out.println("Document Errors:"); trainingDocumentInfo.getErrors().forEach(formRecognizerError -> System.out.printf("Error code %s, Error message: %s%n", formRecognizerError.getErrorCode(), formRecognizerError.getMessage())); } }); }
class CreateComposedModel { /** * Main method to invoke this demo. * * @param args Unused. Arguments to the program. */ }
class CreateComposedModel { /** * Main method to invoke this demo. * * @param args Unused. Arguments to the program. */ }
Done , moved to volatile long startTimeInMs , but we could have done with StopWatch with synchronous block as we are doing in gone and retry policy. Also we are creating new subscriber every time and not sharing with other request , so not sure we would have the threading problem here. Any way I am fine either way
protected void hookOnNext(T value) { if (durationTimer.isStarted()) { durationTimer.stop(); } if (durationTimer != null && durationTimer.getTime() > thresholdForDiagnosticsInMs) { if (value instanceof CosmosItemResponse) { CosmosItemResponse itemResponse = (CosmosItemResponse) value; logger.info("Request taking longer than {}ms diagnostic = {}", thresholdForDiagnosticsInMs, itemResponse.getDiagnostics().toString()); } else if (value instanceof FeedResponse) { durationTimer.reset(); durationTimer.start(); FeedResponse feedResponse = (FeedResponse) value; logger.info("Request taking longer than {}ms diagnostic = {}", thresholdForDiagnosticsInMs, feedResponse.getCosmosDiagnostics().toString()); } } logger.debug("hookOnNext: {}, count:{}", value, count.get()); }
if (durationTimer != null && durationTimer.getTime() > thresholdForDiagnosticsInMs) {
protected void hookOnNext(T value) { if ((System.currentTimeMillis() - startTimeInMs) > diagnosticsThresholdDuration.toMillis()) { if (value instanceof CosmosItemResponse) { CosmosItemResponse itemResponse = (CosmosItemResponse) value; logger.info("Request taking longer than {}ms diagnostic = {}", diagnosticsThresholdDuration.toMillis(), itemResponse.getDiagnostics().toString()); } else if (value instanceof FeedResponse) { startTimeInMs = System.currentTimeMillis(); FeedResponse feedResponse = (FeedResponse) value; logger.info("Request taking longer than {}ms diagnostic = {}", diagnosticsThresholdDuration.toMillis(), feedResponse.getCosmosDiagnostics().toString()); } } logger.debug("hookOnNext: {}, count:{}", value, count.get()); }
class BenchmarkRequestSubscriber<T> extends BaseSubscriber<T> { final static Logger logger = LoggerFactory.getLogger(BenchmarkRequestSubscriber.class); private final StopWatch durationTimer = new StopWatch(); private Meter successMeter; private Meter failureMeter; private Semaphore concurrencyControlSemaphore; private AtomicLong count; private int thresholdForDiagnosticsInMs; public Timer.Context context; public BenchmarkRequestSubscriber(Meter successMeter, Meter failureMeter, Semaphore concurrencyControlSemaphore, AtomicLong count, int thresholdForDiagnosticsInMs) { this.successMeter = successMeter; this.failureMeter = failureMeter; this.concurrencyControlSemaphore = concurrencyControlSemaphore; this.count = count; this.thresholdForDiagnosticsInMs = thresholdForDiagnosticsInMs; } @Override protected void hookOnSubscribe(Subscription subscription) { super.hookOnSubscribe(subscription); durationTimer.start(); } @Override @Override protected void hookOnComplete() { context.stop(); successMeter.mark(); concurrencyControlSemaphore.release(); synchronized (count) { count.incrementAndGet(); count.notify(); } } @Override protected void hookOnError(Throwable throwable) { context.stop(); failureMeter.mark(); logger.error("Encountered failure {} on thread {}" , throwable.getMessage(), Thread.currentThread().getName(), throwable); concurrencyControlSemaphore.release(); synchronized (count) { count.incrementAndGet(); count.notify(); } } }
class BenchmarkRequestSubscriber<T> extends BaseSubscriber<T> { final static Logger logger = LoggerFactory.getLogger(BenchmarkRequestSubscriber.class); private final Duration diagnosticsThresholdDuration; private volatile long startTimeInMs; private Meter successMeter; private Meter failureMeter; private Semaphore concurrencyControlSemaphore; private AtomicLong count; public Timer.Context context; public BenchmarkRequestSubscriber(Meter successMeter, Meter failureMeter, Semaphore concurrencyControlSemaphore, AtomicLong count, Duration diagnosticsThresholdDuration) { this.successMeter = successMeter; this.failureMeter = failureMeter; this.concurrencyControlSemaphore = concurrencyControlSemaphore; this.count = count; this.diagnosticsThresholdDuration = diagnosticsThresholdDuration; } @Override protected void hookOnSubscribe(Subscription subscription) { super.hookOnSubscribe(subscription); startTimeInMs = System.currentTimeMillis(); } @Override @Override protected void hookOnComplete() { context.stop(); successMeter.mark(); concurrencyControlSemaphore.release(); synchronized (count) { count.incrementAndGet(); count.notify(); } } @Override protected void hookOnError(Throwable throwable) { context.stop(); failureMeter.mark(); logger.error("Encountered failure {} on thread {}", throwable.getMessage(), Thread.currentThread().getName(), throwable); concurrencyControlSemaphore.release(); synchronized (count) { count.incrementAndGet(); count.notify(); } } }
StopWatch can be used on different threads and it is not thread safe. Please use a volatile long for measuring the checkpoint. ```suggestion if (durationTimer != null && System.currentMilis() startTimeInMs- > thresholdForDiagnosticsInMs) { ```
protected void hookOnNext(T value) { if (durationTimer.isStarted()) { durationTimer.stop(); } if (durationTimer != null && durationTimer.getTime() > thresholdForDiagnosticsInMs) { if (value instanceof CosmosItemResponse) { CosmosItemResponse itemResponse = (CosmosItemResponse) value; logger.info("Request taking longer than {}ms diagnostic = {}", thresholdForDiagnosticsInMs, itemResponse.getDiagnostics().toString()); } else if (value instanceof FeedResponse) { durationTimer.reset(); durationTimer.start(); FeedResponse feedResponse = (FeedResponse) value; logger.info("Request taking longer than {}ms diagnostic = {}", thresholdForDiagnosticsInMs, feedResponse.getCosmosDiagnostics().toString()); } } logger.debug("hookOnNext: {}, count:{}", value, count.get()); }
if (durationTimer != null && durationTimer.getTime() > thresholdForDiagnosticsInMs) {
protected void hookOnNext(T value) { if ((System.currentTimeMillis() - startTimeInMs) > diagnosticsThresholdDuration.toMillis()) { if (value instanceof CosmosItemResponse) { CosmosItemResponse itemResponse = (CosmosItemResponse) value; logger.info("Request taking longer than {}ms diagnostic = {}", diagnosticsThresholdDuration.toMillis(), itemResponse.getDiagnostics().toString()); } else if (value instanceof FeedResponse) { startTimeInMs = System.currentTimeMillis(); FeedResponse feedResponse = (FeedResponse) value; logger.info("Request taking longer than {}ms diagnostic = {}", diagnosticsThresholdDuration.toMillis(), feedResponse.getCosmosDiagnostics().toString()); } } logger.debug("hookOnNext: {}, count:{}", value, count.get()); }
class BenchmarkRequestSubscriber<T> extends BaseSubscriber<T> { final static Logger logger = LoggerFactory.getLogger(BenchmarkRequestSubscriber.class); private final StopWatch durationTimer = new StopWatch(); private Meter successMeter; private Meter failureMeter; private Semaphore concurrencyControlSemaphore; private AtomicLong count; private int thresholdForDiagnosticsInMs; public Timer.Context context; public BenchmarkRequestSubscriber(Meter successMeter, Meter failureMeter, Semaphore concurrencyControlSemaphore, AtomicLong count, int thresholdForDiagnosticsInMs) { this.successMeter = successMeter; this.failureMeter = failureMeter; this.concurrencyControlSemaphore = concurrencyControlSemaphore; this.count = count; this.thresholdForDiagnosticsInMs = thresholdForDiagnosticsInMs; } @Override protected void hookOnSubscribe(Subscription subscription) { super.hookOnSubscribe(subscription); durationTimer.start(); } @Override @Override protected void hookOnComplete() { context.stop(); successMeter.mark(); concurrencyControlSemaphore.release(); synchronized (count) { count.incrementAndGet(); count.notify(); } } @Override protected void hookOnError(Throwable throwable) { context.stop(); failureMeter.mark(); logger.error("Encountered failure {} on thread {}" , throwable.getMessage(), Thread.currentThread().getName(), throwable); concurrencyControlSemaphore.release(); synchronized (count) { count.incrementAndGet(); count.notify(); } } }
class BenchmarkRequestSubscriber<T> extends BaseSubscriber<T> { final static Logger logger = LoggerFactory.getLogger(BenchmarkRequestSubscriber.class); private final Duration diagnosticsThresholdDuration; private volatile long startTimeInMs; private Meter successMeter; private Meter failureMeter; private Semaphore concurrencyControlSemaphore; private AtomicLong count; public Timer.Context context; public BenchmarkRequestSubscriber(Meter successMeter, Meter failureMeter, Semaphore concurrencyControlSemaphore, AtomicLong count, Duration diagnosticsThresholdDuration) { this.successMeter = successMeter; this.failureMeter = failureMeter; this.concurrencyControlSemaphore = concurrencyControlSemaphore; this.count = count; this.diagnosticsThresholdDuration = diagnosticsThresholdDuration; } @Override protected void hookOnSubscribe(Subscription subscription) { super.hookOnSubscribe(subscription); startTimeInMs = System.currentTimeMillis(); } @Override @Override protected void hookOnComplete() { context.stop(); successMeter.mark(); concurrencyControlSemaphore.release(); synchronized (count) { count.incrementAndGet(); count.notify(); } } @Override protected void hookOnError(Throwable throwable) { context.stop(); failureMeter.mark(); logger.error("Encountered failure {} on thread {}", throwable.getMessage(), Thread.currentThread().getName(), throwable); concurrencyControlSemaphore.release(); synchronized (count) { count.incrementAndGet(); count.notify(); } } }
Wow - thanks - I would never have imagined StopWatch to not be thread-safe in Java
protected void hookOnNext(T value) { if (durationTimer.isStarted()) { durationTimer.stop(); } if (durationTimer != null && durationTimer.getTime() > thresholdForDiagnosticsInMs) { if (value instanceof CosmosItemResponse) { CosmosItemResponse itemResponse = (CosmosItemResponse) value; logger.info("Request taking longer than {}ms diagnostic = {}", thresholdForDiagnosticsInMs, itemResponse.getDiagnostics().toString()); } else if (value instanceof FeedResponse) { durationTimer.reset(); durationTimer.start(); FeedResponse feedResponse = (FeedResponse) value; logger.info("Request taking longer than {}ms diagnostic = {}", thresholdForDiagnosticsInMs, feedResponse.getCosmosDiagnostics().toString()); } } logger.debug("hookOnNext: {}, count:{}", value, count.get()); }
if (durationTimer != null && durationTimer.getTime() > thresholdForDiagnosticsInMs) {
protected void hookOnNext(T value) { if ((System.currentTimeMillis() - startTimeInMs) > diagnosticsThresholdDuration.toMillis()) { if (value instanceof CosmosItemResponse) { CosmosItemResponse itemResponse = (CosmosItemResponse) value; logger.info("Request taking longer than {}ms diagnostic = {}", diagnosticsThresholdDuration.toMillis(), itemResponse.getDiagnostics().toString()); } else if (value instanceof FeedResponse) { startTimeInMs = System.currentTimeMillis(); FeedResponse feedResponse = (FeedResponse) value; logger.info("Request taking longer than {}ms diagnostic = {}", diagnosticsThresholdDuration.toMillis(), feedResponse.getCosmosDiagnostics().toString()); } } logger.debug("hookOnNext: {}, count:{}", value, count.get()); }
class BenchmarkRequestSubscriber<T> extends BaseSubscriber<T> { final static Logger logger = LoggerFactory.getLogger(BenchmarkRequestSubscriber.class); private final StopWatch durationTimer = new StopWatch(); private Meter successMeter; private Meter failureMeter; private Semaphore concurrencyControlSemaphore; private AtomicLong count; private int thresholdForDiagnosticsInMs; public Timer.Context context; public BenchmarkRequestSubscriber(Meter successMeter, Meter failureMeter, Semaphore concurrencyControlSemaphore, AtomicLong count, int thresholdForDiagnosticsInMs) { this.successMeter = successMeter; this.failureMeter = failureMeter; this.concurrencyControlSemaphore = concurrencyControlSemaphore; this.count = count; this.thresholdForDiagnosticsInMs = thresholdForDiagnosticsInMs; } @Override protected void hookOnSubscribe(Subscription subscription) { super.hookOnSubscribe(subscription); durationTimer.start(); } @Override @Override protected void hookOnComplete() { context.stop(); successMeter.mark(); concurrencyControlSemaphore.release(); synchronized (count) { count.incrementAndGet(); count.notify(); } } @Override protected void hookOnError(Throwable throwable) { context.stop(); failureMeter.mark(); logger.error("Encountered failure {} on thread {}" , throwable.getMessage(), Thread.currentThread().getName(), throwable); concurrencyControlSemaphore.release(); synchronized (count) { count.incrementAndGet(); count.notify(); } } }
class BenchmarkRequestSubscriber<T> extends BaseSubscriber<T> { final static Logger logger = LoggerFactory.getLogger(BenchmarkRequestSubscriber.class); private final Duration diagnosticsThresholdDuration; private volatile long startTimeInMs; private Meter successMeter; private Meter failureMeter; private Semaphore concurrencyControlSemaphore; private AtomicLong count; public Timer.Context context; public BenchmarkRequestSubscriber(Meter successMeter, Meter failureMeter, Semaphore concurrencyControlSemaphore, AtomicLong count, Duration diagnosticsThresholdDuration) { this.successMeter = successMeter; this.failureMeter = failureMeter; this.concurrencyControlSemaphore = concurrencyControlSemaphore; this.count = count; this.diagnosticsThresholdDuration = diagnosticsThresholdDuration; } @Override protected void hookOnSubscribe(Subscription subscription) { super.hookOnSubscribe(subscription); startTimeInMs = System.currentTimeMillis(); } @Override @Override protected void hookOnComplete() { context.stop(); successMeter.mark(); concurrencyControlSemaphore.release(); synchronized (count) { count.incrementAndGet(); count.notify(); } } @Override protected void hookOnError(Throwable throwable) { context.stop(); failureMeter.mark(); logger.error("Encountered failure {} on thread {}", throwable.getMessage(), Thread.currentThread().getName(), throwable); concurrencyControlSemaphore.release(); synchronized (count) { count.incrementAndGet(); count.notify(); } } }
per documentation it is not :-) http://commons.apache.org/proper/commons-lang/apidocs/org/apache/commons/lang3/time/StopWatch.html https://guava.dev/releases/18.0/api/docs/com/google/common/base/Stopwatch.html
protected void hookOnNext(T value) { if (durationTimer.isStarted()) { durationTimer.stop(); } if (durationTimer != null && durationTimer.getTime() > thresholdForDiagnosticsInMs) { if (value instanceof CosmosItemResponse) { CosmosItemResponse itemResponse = (CosmosItemResponse) value; logger.info("Request taking longer than {}ms diagnostic = {}", thresholdForDiagnosticsInMs, itemResponse.getDiagnostics().toString()); } else if (value instanceof FeedResponse) { durationTimer.reset(); durationTimer.start(); FeedResponse feedResponse = (FeedResponse) value; logger.info("Request taking longer than {}ms diagnostic = {}", thresholdForDiagnosticsInMs, feedResponse.getCosmosDiagnostics().toString()); } } logger.debug("hookOnNext: {}, count:{}", value, count.get()); }
if (durationTimer != null && durationTimer.getTime() > thresholdForDiagnosticsInMs) {
protected void hookOnNext(T value) { if ((System.currentTimeMillis() - startTimeInMs) > diagnosticsThresholdDuration.toMillis()) { if (value instanceof CosmosItemResponse) { CosmosItemResponse itemResponse = (CosmosItemResponse) value; logger.info("Request taking longer than {}ms diagnostic = {}", diagnosticsThresholdDuration.toMillis(), itemResponse.getDiagnostics().toString()); } else if (value instanceof FeedResponse) { startTimeInMs = System.currentTimeMillis(); FeedResponse feedResponse = (FeedResponse) value; logger.info("Request taking longer than {}ms diagnostic = {}", diagnosticsThresholdDuration.toMillis(), feedResponse.getCosmosDiagnostics().toString()); } } logger.debug("hookOnNext: {}, count:{}", value, count.get()); }
class BenchmarkRequestSubscriber<T> extends BaseSubscriber<T> { final static Logger logger = LoggerFactory.getLogger(BenchmarkRequestSubscriber.class); private final StopWatch durationTimer = new StopWatch(); private Meter successMeter; private Meter failureMeter; private Semaphore concurrencyControlSemaphore; private AtomicLong count; private int thresholdForDiagnosticsInMs; public Timer.Context context; public BenchmarkRequestSubscriber(Meter successMeter, Meter failureMeter, Semaphore concurrencyControlSemaphore, AtomicLong count, int thresholdForDiagnosticsInMs) { this.successMeter = successMeter; this.failureMeter = failureMeter; this.concurrencyControlSemaphore = concurrencyControlSemaphore; this.count = count; this.thresholdForDiagnosticsInMs = thresholdForDiagnosticsInMs; } @Override protected void hookOnSubscribe(Subscription subscription) { super.hookOnSubscribe(subscription); durationTimer.start(); } @Override @Override protected void hookOnComplete() { context.stop(); successMeter.mark(); concurrencyControlSemaphore.release(); synchronized (count) { count.incrementAndGet(); count.notify(); } } @Override protected void hookOnError(Throwable throwable) { context.stop(); failureMeter.mark(); logger.error("Encountered failure {} on thread {}" , throwable.getMessage(), Thread.currentThread().getName(), throwable); concurrencyControlSemaphore.release(); synchronized (count) { count.incrementAndGet(); count.notify(); } } }
class BenchmarkRequestSubscriber<T> extends BaseSubscriber<T> { final static Logger logger = LoggerFactory.getLogger(BenchmarkRequestSubscriber.class); private final Duration diagnosticsThresholdDuration; private volatile long startTimeInMs; private Meter successMeter; private Meter failureMeter; private Semaphore concurrencyControlSemaphore; private AtomicLong count; public Timer.Context context; public BenchmarkRequestSubscriber(Meter successMeter, Meter failureMeter, Semaphore concurrencyControlSemaphore, AtomicLong count, Duration diagnosticsThresholdDuration) { this.successMeter = successMeter; this.failureMeter = failureMeter; this.concurrencyControlSemaphore = concurrencyControlSemaphore; this.count = count; this.diagnosticsThresholdDuration = diagnosticsThresholdDuration; } @Override protected void hookOnSubscribe(Subscription subscription) { super.hookOnSubscribe(subscription); startTimeInMs = System.currentTimeMillis(); } @Override @Override protected void hookOnComplete() { context.stop(); successMeter.mark(); concurrencyControlSemaphore.release(); synchronized (count) { count.incrementAndGet(); count.notify(); } } @Override protected void hookOnError(Throwable throwable) { context.stop(); failureMeter.mark(); logger.error("Encountered failure {} on thread {}", throwable.getMessage(), Thread.currentThread().getName(), throwable); concurrencyControlSemaphore.release(); synchronized (count) { count.incrementAndGet(); count.notify(); } } }
Any reason for setting this to 4 instead of 1? Probably adding a comment to explain why 4 was chosen would help.
private Flux<P> retrievePages(ContinuationState<C> state, PageRetriever<C, P> pageRetriever, Integer pageSize) { return retrievePage(state, pageRetriever, pageSize) .expand(page -> { state.setLastContinuationToken(page.getContinuationToken()); return Flux.defer(() -> retrievePage(state, pageRetriever, pageSize)); }, 4); }
}, 4);
private Flux<P> retrievePages(ContinuationState<C> state, PageRetriever<C, P> pageRetriever, Integer pageSize) { /* * The second argument for 'expand' is an initial capacity hint to the expand subscriber to indicate what size * buffer it should instantiate. 4 is used as PageRetriever's 'get' returns a Flux so an implementation may * return multiple pages, but in the case only one page is retrieved the buffer won't need to be resized or * request additional pages from the service. */ return retrievePage(state, pageRetriever, pageSize) .expand(page -> { state.setLastContinuationToken(page.getContinuationToken()); return Flux.defer(() -> retrievePage(state, pageRetriever, pageSize)); }, 4); }
class ContinuablePagedFluxCore<C, T, P extends ContinuablePage<C, T>> extends ContinuablePagedFlux<C, T, P> { final Supplier<PageRetriever<C, P>> pageRetrieverProvider; final Integer defaultPageSize; /** * Creates an instance of {@link ContinuablePagedFluxCore}. * * @param pageRetrieverProvider a provider that returns {@link PageRetriever}. */ protected ContinuablePagedFluxCore(Supplier<PageRetriever<C, P>> pageRetrieverProvider) { this.pageRetrieverProvider = Objects.requireNonNull(pageRetrieverProvider, "'pageRetrieverProvider' function cannot be null."); this.defaultPageSize = null; } /** * Creates an instance of {@link ContinuablePagedFluxCore}. * * @param pageRetrieverProvider a provider that returns {@link PageRetriever}. * @param pageSize the preferred page size * @throws IllegalArgumentException if defaultPageSize is not greater than zero */ protected ContinuablePagedFluxCore(Supplier<PageRetriever<C, P>> pageRetrieverProvider, int pageSize) { this.pageRetrieverProvider = Objects.requireNonNull(pageRetrieverProvider, "'pageRetrieverProvider' function cannot be null."); if (pageSize <= 0) { throw new IllegalArgumentException("pageSize > 0 required but provided: " + pageSize); } this.defaultPageSize = pageSize; } /** * Get the page size configured this {@link ContinuablePagedFluxCore}. * * @return the page size configured, {@code null} if unspecified. */ public Integer getPageSize() { return this.defaultPageSize; } @Override public Flux<P> byPage() { return byPage(this.pageRetrieverProvider, null, this.defaultPageSize); } @Override public Flux<P> byPage(C continuationToken) { if (continuationToken == null) { return Flux.empty(); } return byPage(this.pageRetrieverProvider, continuationToken, this.defaultPageSize); } @Override public Flux<P> byPage(int preferredPageSize) { if (preferredPageSize <= 0) { return Flux.error(new IllegalArgumentException("preferredPageSize > 0 required but provided: " + preferredPageSize)); } return byPage(this.pageRetrieverProvider, null, preferredPageSize); } @Override public Flux<P> byPage(C continuationToken, int preferredPageSize) { if (preferredPageSize <= 0) { return Flux.error(new IllegalArgumentException("preferredPageSize > 0 required but provided: " + preferredPageSize)); } if (continuationToken == null) { return Flux.empty(); } return byPage(this.pageRetrieverProvider, continuationToken, preferredPageSize); } /** * Subscribe to consume all items of type {@code T} in the sequence respectively. This is recommended for most * common scenarios. This will seamlessly fetch next page when required and provide with a {@link Flux} of items. * * @param coreSubscriber The subscriber for this {@link ContinuablePagedFluxCore} */ @Override public void subscribe(CoreSubscriber<? super T> coreSubscriber) { byPage(this.pageRetrieverProvider, null, this.defaultPageSize) .flatMap(page -> { IterableStream<T> iterableStream = page.getElements(); return iterableStream == null ? Flux.empty() : Flux.fromIterable(page.getElements()); }) .subscribe(coreSubscriber); } /** * Get a Flux of {@link ContinuablePage} created by concat-ing Flux instances returned Page Retriever Function * calls. * * @param provider the provider that when called returns Page Retriever Function * @param continuationToken the token to identify the pages to be retrieved * @param pageSize the preferred page size * @return a Flux of {@link ContinuablePage} identified by the given continuation token */ private Flux<P> byPage(Supplier<PageRetriever<C, P>> provider, C continuationToken, Integer pageSize) { return Flux.defer(() -> { final PageRetriever<C, P> pageRetriever = provider.get(); final ContinuationState<C> state = new ContinuationState<>(continuationToken); return retrievePages(state, pageRetriever, pageSize); }); } /** * Get a Flux of {@link ContinuablePage} created by concat-ing child Flux instances returned Page Retriever Function * calls. The first child Flux of {@link ContinuablePage} is identified by the continuation-token in the state. * * @param state the state to be used across multiple Page Retriever Function calls * @param pageRetriever the Page Retriever Function * @param pageSize the preferred page size * @return a Flux of {@link ContinuablePage} */ private Flux<P> retrievePage(ContinuationState<C> state, PageRetriever<C, P> pageRetriever, Integer pageSize) { if (state.isDone()) { return Flux.empty(); } else { return pageRetriever.get(state.getLastContinuationToken(), pageSize) .switchIfEmpty(Flux.defer(() -> { state.setLastContinuationToken(null); return Mono.empty(); })); } } }
class ContinuablePagedFluxCore<C, T, P extends ContinuablePage<C, T>> extends ContinuablePagedFlux<C, T, P> { final Supplier<PageRetriever<C, P>> pageRetrieverProvider; final Integer defaultPageSize; /** * Creates an instance of {@link ContinuablePagedFluxCore}. * * @param pageRetrieverProvider a provider that returns {@link PageRetriever}. */ protected ContinuablePagedFluxCore(Supplier<PageRetriever<C, P>> pageRetrieverProvider) { this.pageRetrieverProvider = Objects.requireNonNull(pageRetrieverProvider, "'pageRetrieverProvider' function cannot be null."); this.defaultPageSize = null; } /** * Creates an instance of {@link ContinuablePagedFluxCore}. * * @param pageRetrieverProvider a provider that returns {@link PageRetriever}. * @param pageSize the preferred page size * @throws IllegalArgumentException if defaultPageSize is not greater than zero */ protected ContinuablePagedFluxCore(Supplier<PageRetriever<C, P>> pageRetrieverProvider, int pageSize) { this.pageRetrieverProvider = Objects.requireNonNull(pageRetrieverProvider, "'pageRetrieverProvider' function cannot be null."); if (pageSize <= 0) { throw new IllegalArgumentException("pageSize > 0 required but provided: " + pageSize); } this.defaultPageSize = pageSize; } /** * Get the page size configured this {@link ContinuablePagedFluxCore}. * * @return the page size configured, {@code null} if unspecified. */ public Integer getPageSize() { return this.defaultPageSize; } @Override public Flux<P> byPage() { return byPage(this.pageRetrieverProvider, null, this.defaultPageSize); } @Override public Flux<P> byPage(C continuationToken) { if (continuationToken == null) { return Flux.empty(); } return byPage(this.pageRetrieverProvider, continuationToken, this.defaultPageSize); } @Override public Flux<P> byPage(int preferredPageSize) { if (preferredPageSize <= 0) { return Flux.error(new IllegalArgumentException("preferredPageSize > 0 required but provided: " + preferredPageSize)); } return byPage(this.pageRetrieverProvider, null, preferredPageSize); } @Override public Flux<P> byPage(C continuationToken, int preferredPageSize) { if (preferredPageSize <= 0) { return Flux.error(new IllegalArgumentException("preferredPageSize > 0 required but provided: " + preferredPageSize)); } if (continuationToken == null) { return Flux.empty(); } return byPage(this.pageRetrieverProvider, continuationToken, preferredPageSize); } /** * Subscribe to consume all items of type {@code T} in the sequence respectively. This is recommended for most * common scenarios. This will seamlessly fetch next page when required and provide with a {@link Flux} of items. * * @param coreSubscriber The subscriber for this {@link ContinuablePagedFluxCore} */ @Override public void subscribe(CoreSubscriber<? super T> coreSubscriber) { byPage(this.pageRetrieverProvider, null, this.defaultPageSize) .flatMap(page -> { IterableStream<T> iterableStream = page.getElements(); return iterableStream == null ? Flux.empty() : Flux.fromIterable(page.getElements()); }) .subscribe(coreSubscriber); } /** * Get a Flux of {@link ContinuablePage} created by concat-ing Flux instances returned Page Retriever Function * calls. * * @param provider the provider that when called returns Page Retriever Function * @param continuationToken the token to identify the pages to be retrieved * @param pageSize the preferred page size * @return a Flux of {@link ContinuablePage} identified by the given continuation token */ private Flux<P> byPage(Supplier<PageRetriever<C, P>> provider, C continuationToken, Integer pageSize) { return Flux.defer(() -> { final PageRetriever<C, P> pageRetriever = provider.get(); final ContinuationState<C> state = new ContinuationState<>(continuationToken); return retrievePages(state, pageRetriever, pageSize); }); } /** * Get a Flux of {@link ContinuablePage} created by concat-ing child Flux instances returned Page Retriever Function * calls. The first child Flux of {@link ContinuablePage} is identified by the continuation-token in the state. * * @param state the state to be used across multiple Page Retriever Function calls * @param pageRetriever the Page Retriever Function * @param pageSize the preferred page size * @return a Flux of {@link ContinuablePage} */ private Flux<P> retrievePage(ContinuationState<C> state, PageRetriever<C, P> pageRetriever, Integer pageSize) { if (state.isDone()) { return Flux.empty(); } else { return pageRetriever.get(state.getLastContinuationToken(), pageSize) .switchIfEmpty(Flux.defer(() -> { state.setLastContinuationToken(null); return Mono.empty(); })); } } }
I should add a comment here. The second argument is a hint to the operator that determines how large of an initial buffer it should instantiate. Given that `PageRetriever` returns a `Flux` the implementation could return more than one. Also, appears from looking at Reactor's code the smallest this could be is `8` but sticking with `4` for now in case that changes in the future.
private Flux<P> retrievePages(ContinuationState<C> state, PageRetriever<C, P> pageRetriever, Integer pageSize) { return retrievePage(state, pageRetriever, pageSize) .expand(page -> { state.setLastContinuationToken(page.getContinuationToken()); return Flux.defer(() -> retrievePage(state, pageRetriever, pageSize)); }, 4); }
}, 4);
private Flux<P> retrievePages(ContinuationState<C> state, PageRetriever<C, P> pageRetriever, Integer pageSize) { /* * The second argument for 'expand' is an initial capacity hint to the expand subscriber to indicate what size * buffer it should instantiate. 4 is used as PageRetriever's 'get' returns a Flux so an implementation may * return multiple pages, but in the case only one page is retrieved the buffer won't need to be resized or * request additional pages from the service. */ return retrievePage(state, pageRetriever, pageSize) .expand(page -> { state.setLastContinuationToken(page.getContinuationToken()); return Flux.defer(() -> retrievePage(state, pageRetriever, pageSize)); }, 4); }
class ContinuablePagedFluxCore<C, T, P extends ContinuablePage<C, T>> extends ContinuablePagedFlux<C, T, P> { final Supplier<PageRetriever<C, P>> pageRetrieverProvider; final Integer defaultPageSize; /** * Creates an instance of {@link ContinuablePagedFluxCore}. * * @param pageRetrieverProvider a provider that returns {@link PageRetriever}. */ protected ContinuablePagedFluxCore(Supplier<PageRetriever<C, P>> pageRetrieverProvider) { this.pageRetrieverProvider = Objects.requireNonNull(pageRetrieverProvider, "'pageRetrieverProvider' function cannot be null."); this.defaultPageSize = null; } /** * Creates an instance of {@link ContinuablePagedFluxCore}. * * @param pageRetrieverProvider a provider that returns {@link PageRetriever}. * @param pageSize the preferred page size * @throws IllegalArgumentException if defaultPageSize is not greater than zero */ protected ContinuablePagedFluxCore(Supplier<PageRetriever<C, P>> pageRetrieverProvider, int pageSize) { this.pageRetrieverProvider = Objects.requireNonNull(pageRetrieverProvider, "'pageRetrieverProvider' function cannot be null."); if (pageSize <= 0) { throw new IllegalArgumentException("pageSize > 0 required but provided: " + pageSize); } this.defaultPageSize = pageSize; } /** * Get the page size configured this {@link ContinuablePagedFluxCore}. * * @return the page size configured, {@code null} if unspecified. */ public Integer getPageSize() { return this.defaultPageSize; } @Override public Flux<P> byPage() { return byPage(this.pageRetrieverProvider, null, this.defaultPageSize); } @Override public Flux<P> byPage(C continuationToken) { if (continuationToken == null) { return Flux.empty(); } return byPage(this.pageRetrieverProvider, continuationToken, this.defaultPageSize); } @Override public Flux<P> byPage(int preferredPageSize) { if (preferredPageSize <= 0) { return Flux.error(new IllegalArgumentException("preferredPageSize > 0 required but provided: " + preferredPageSize)); } return byPage(this.pageRetrieverProvider, null, preferredPageSize); } @Override public Flux<P> byPage(C continuationToken, int preferredPageSize) { if (preferredPageSize <= 0) { return Flux.error(new IllegalArgumentException("preferredPageSize > 0 required but provided: " + preferredPageSize)); } if (continuationToken == null) { return Flux.empty(); } return byPage(this.pageRetrieverProvider, continuationToken, preferredPageSize); } /** * Subscribe to consume all items of type {@code T} in the sequence respectively. This is recommended for most * common scenarios. This will seamlessly fetch next page when required and provide with a {@link Flux} of items. * * @param coreSubscriber The subscriber for this {@link ContinuablePagedFluxCore} */ @Override public void subscribe(CoreSubscriber<? super T> coreSubscriber) { byPage(this.pageRetrieverProvider, null, this.defaultPageSize) .flatMap(page -> { IterableStream<T> iterableStream = page.getElements(); return iterableStream == null ? Flux.empty() : Flux.fromIterable(page.getElements()); }) .subscribe(coreSubscriber); } /** * Get a Flux of {@link ContinuablePage} created by concat-ing Flux instances returned Page Retriever Function * calls. * * @param provider the provider that when called returns Page Retriever Function * @param continuationToken the token to identify the pages to be retrieved * @param pageSize the preferred page size * @return a Flux of {@link ContinuablePage} identified by the given continuation token */ private Flux<P> byPage(Supplier<PageRetriever<C, P>> provider, C continuationToken, Integer pageSize) { return Flux.defer(() -> { final PageRetriever<C, P> pageRetriever = provider.get(); final ContinuationState<C> state = new ContinuationState<>(continuationToken); return retrievePages(state, pageRetriever, pageSize); }); } /** * Get a Flux of {@link ContinuablePage} created by concat-ing child Flux instances returned Page Retriever Function * calls. The first child Flux of {@link ContinuablePage} is identified by the continuation-token in the state. * * @param state the state to be used across multiple Page Retriever Function calls * @param pageRetriever the Page Retriever Function * @param pageSize the preferred page size * @return a Flux of {@link ContinuablePage} */ private Flux<P> retrievePage(ContinuationState<C> state, PageRetriever<C, P> pageRetriever, Integer pageSize) { if (state.isDone()) { return Flux.empty(); } else { return pageRetriever.get(state.getLastContinuationToken(), pageSize) .switchIfEmpty(Flux.defer(() -> { state.setLastContinuationToken(null); return Mono.empty(); })); } } }
class ContinuablePagedFluxCore<C, T, P extends ContinuablePage<C, T>> extends ContinuablePagedFlux<C, T, P> { final Supplier<PageRetriever<C, P>> pageRetrieverProvider; final Integer defaultPageSize; /** * Creates an instance of {@link ContinuablePagedFluxCore}. * * @param pageRetrieverProvider a provider that returns {@link PageRetriever}. */ protected ContinuablePagedFluxCore(Supplier<PageRetriever<C, P>> pageRetrieverProvider) { this.pageRetrieverProvider = Objects.requireNonNull(pageRetrieverProvider, "'pageRetrieverProvider' function cannot be null."); this.defaultPageSize = null; } /** * Creates an instance of {@link ContinuablePagedFluxCore}. * * @param pageRetrieverProvider a provider that returns {@link PageRetriever}. * @param pageSize the preferred page size * @throws IllegalArgumentException if defaultPageSize is not greater than zero */ protected ContinuablePagedFluxCore(Supplier<PageRetriever<C, P>> pageRetrieverProvider, int pageSize) { this.pageRetrieverProvider = Objects.requireNonNull(pageRetrieverProvider, "'pageRetrieverProvider' function cannot be null."); if (pageSize <= 0) { throw new IllegalArgumentException("pageSize > 0 required but provided: " + pageSize); } this.defaultPageSize = pageSize; } /** * Get the page size configured this {@link ContinuablePagedFluxCore}. * * @return the page size configured, {@code null} if unspecified. */ public Integer getPageSize() { return this.defaultPageSize; } @Override public Flux<P> byPage() { return byPage(this.pageRetrieverProvider, null, this.defaultPageSize); } @Override public Flux<P> byPage(C continuationToken) { if (continuationToken == null) { return Flux.empty(); } return byPage(this.pageRetrieverProvider, continuationToken, this.defaultPageSize); } @Override public Flux<P> byPage(int preferredPageSize) { if (preferredPageSize <= 0) { return Flux.error(new IllegalArgumentException("preferredPageSize > 0 required but provided: " + preferredPageSize)); } return byPage(this.pageRetrieverProvider, null, preferredPageSize); } @Override public Flux<P> byPage(C continuationToken, int preferredPageSize) { if (preferredPageSize <= 0) { return Flux.error(new IllegalArgumentException("preferredPageSize > 0 required but provided: " + preferredPageSize)); } if (continuationToken == null) { return Flux.empty(); } return byPage(this.pageRetrieverProvider, continuationToken, preferredPageSize); } /** * Subscribe to consume all items of type {@code T} in the sequence respectively. This is recommended for most * common scenarios. This will seamlessly fetch next page when required and provide with a {@link Flux} of items. * * @param coreSubscriber The subscriber for this {@link ContinuablePagedFluxCore} */ @Override public void subscribe(CoreSubscriber<? super T> coreSubscriber) { byPage(this.pageRetrieverProvider, null, this.defaultPageSize) .flatMap(page -> { IterableStream<T> iterableStream = page.getElements(); return iterableStream == null ? Flux.empty() : Flux.fromIterable(page.getElements()); }) .subscribe(coreSubscriber); } /** * Get a Flux of {@link ContinuablePage} created by concat-ing Flux instances returned Page Retriever Function * calls. * * @param provider the provider that when called returns Page Retriever Function * @param continuationToken the token to identify the pages to be retrieved * @param pageSize the preferred page size * @return a Flux of {@link ContinuablePage} identified by the given continuation token */ private Flux<P> byPage(Supplier<PageRetriever<C, P>> provider, C continuationToken, Integer pageSize) { return Flux.defer(() -> { final PageRetriever<C, P> pageRetriever = provider.get(); final ContinuationState<C> state = new ContinuationState<>(continuationToken); return retrievePages(state, pageRetriever, pageSize); }); } /** * Get a Flux of {@link ContinuablePage} created by concat-ing child Flux instances returned Page Retriever Function * calls. The first child Flux of {@link ContinuablePage} is identified by the continuation-token in the state. * * @param state the state to be used across multiple Page Retriever Function calls * @param pageRetriever the Page Retriever Function * @param pageSize the preferred page size * @return a Flux of {@link ContinuablePage} */ private Flux<P> retrievePage(ContinuationState<C> state, PageRetriever<C, P> pageRetriever, Integer pageSize) { if (state.isDone()) { return Flux.empty(); } else { return pageRetriever.get(state.getLastContinuationToken(), pageSize) .switchIfEmpty(Flux.defer(() -> { state.setLastContinuationToken(null); return Mono.empty(); })); } } }
I think it makes no difference in our case; i.e. storing the continuation-token before we gave the page to the user (i.e. in doOnNext immediately after call to "retrievePage") vs storing it after user consumes the page (inside expand).
private Flux<P> retrievePages(ContinuationState<C> state, PageRetriever<C, P> pageRetriever, Integer pageSize) { return retrievePage(state, pageRetriever, pageSize) .expand(page -> { state.setLastContinuationToken(page.getContinuationToken()); return Flux.defer(() -> retrievePage(state, pageRetriever, pageSize)); }, 4); }
return retrievePage(state, pageRetriever, pageSize)
private Flux<P> retrievePages(ContinuationState<C> state, PageRetriever<C, P> pageRetriever, Integer pageSize) { /* * The second argument for 'expand' is an initial capacity hint to the expand subscriber to indicate what size * buffer it should instantiate. 4 is used as PageRetriever's 'get' returns a Flux so an implementation may * return multiple pages, but in the case only one page is retrieved the buffer won't need to be resized or * request additional pages from the service. */ return retrievePage(state, pageRetriever, pageSize) .expand(page -> { state.setLastContinuationToken(page.getContinuationToken()); return Flux.defer(() -> retrievePage(state, pageRetriever, pageSize)); }, 4); }
class ContinuablePagedFluxCore<C, T, P extends ContinuablePage<C, T>> extends ContinuablePagedFlux<C, T, P> { final Supplier<PageRetriever<C, P>> pageRetrieverProvider; final Integer defaultPageSize; /** * Creates an instance of {@link ContinuablePagedFluxCore}. * * @param pageRetrieverProvider a provider that returns {@link PageRetriever}. */ protected ContinuablePagedFluxCore(Supplier<PageRetriever<C, P>> pageRetrieverProvider) { this.pageRetrieverProvider = Objects.requireNonNull(pageRetrieverProvider, "'pageRetrieverProvider' function cannot be null."); this.defaultPageSize = null; } /** * Creates an instance of {@link ContinuablePagedFluxCore}. * * @param pageRetrieverProvider a provider that returns {@link PageRetriever}. * @param pageSize the preferred page size * @throws IllegalArgumentException if defaultPageSize is not greater than zero */ protected ContinuablePagedFluxCore(Supplier<PageRetriever<C, P>> pageRetrieverProvider, int pageSize) { this.pageRetrieverProvider = Objects.requireNonNull(pageRetrieverProvider, "'pageRetrieverProvider' function cannot be null."); if (pageSize <= 0) { throw new IllegalArgumentException("pageSize > 0 required but provided: " + pageSize); } this.defaultPageSize = pageSize; } /** * Get the page size configured this {@link ContinuablePagedFluxCore}. * * @return the page size configured, {@code null} if unspecified. */ public Integer getPageSize() { return this.defaultPageSize; } @Override public Flux<P> byPage() { return byPage(this.pageRetrieverProvider, null, this.defaultPageSize); } @Override public Flux<P> byPage(C continuationToken) { if (continuationToken == null) { return Flux.empty(); } return byPage(this.pageRetrieverProvider, continuationToken, this.defaultPageSize); } @Override public Flux<P> byPage(int preferredPageSize) { if (preferredPageSize <= 0) { return Flux.error(new IllegalArgumentException("preferredPageSize > 0 required but provided: " + preferredPageSize)); } return byPage(this.pageRetrieverProvider, null, preferredPageSize); } @Override public Flux<P> byPage(C continuationToken, int preferredPageSize) { if (preferredPageSize <= 0) { return Flux.error(new IllegalArgumentException("preferredPageSize > 0 required but provided: " + preferredPageSize)); } if (continuationToken == null) { return Flux.empty(); } return byPage(this.pageRetrieverProvider, continuationToken, preferredPageSize); } /** * Subscribe to consume all items of type {@code T} in the sequence respectively. This is recommended for most * common scenarios. This will seamlessly fetch next page when required and provide with a {@link Flux} of items. * * @param coreSubscriber The subscriber for this {@link ContinuablePagedFluxCore} */ @Override public void subscribe(CoreSubscriber<? super T> coreSubscriber) { byPage(this.pageRetrieverProvider, null, this.defaultPageSize) .flatMap(page -> { IterableStream<T> iterableStream = page.getElements(); return iterableStream == null ? Flux.empty() : Flux.fromIterable(page.getElements()); }) .subscribe(coreSubscriber); } /** * Get a Flux of {@link ContinuablePage} created by concat-ing Flux instances returned Page Retriever Function * calls. * * @param provider the provider that when called returns Page Retriever Function * @param continuationToken the token to identify the pages to be retrieved * @param pageSize the preferred page size * @return a Flux of {@link ContinuablePage} identified by the given continuation token */ private Flux<P> byPage(Supplier<PageRetriever<C, P>> provider, C continuationToken, Integer pageSize) { return Flux.defer(() -> { final PageRetriever<C, P> pageRetriever = provider.get(); final ContinuationState<C> state = new ContinuationState<>(continuationToken); return retrievePages(state, pageRetriever, pageSize); }); } /** * Get a Flux of {@link ContinuablePage} created by concat-ing child Flux instances returned Page Retriever Function * calls. The first child Flux of {@link ContinuablePage} is identified by the continuation-token in the state. * * @param state the state to be used across multiple Page Retriever Function calls * @param pageRetriever the Page Retriever Function * @param pageSize the preferred page size * @return a Flux of {@link ContinuablePage} */ private Flux<P> retrievePage(ContinuationState<C> state, PageRetriever<C, P> pageRetriever, Integer pageSize) { if (state.isDone()) { return Flux.empty(); } else { return pageRetriever.get(state.getLastContinuationToken(), pageSize) .switchIfEmpty(Flux.defer(() -> { state.setLastContinuationToken(null); return Mono.empty(); })); } } }
class ContinuablePagedFluxCore<C, T, P extends ContinuablePage<C, T>> extends ContinuablePagedFlux<C, T, P> { final Supplier<PageRetriever<C, P>> pageRetrieverProvider; final Integer defaultPageSize; /** * Creates an instance of {@link ContinuablePagedFluxCore}. * * @param pageRetrieverProvider a provider that returns {@link PageRetriever}. */ protected ContinuablePagedFluxCore(Supplier<PageRetriever<C, P>> pageRetrieverProvider) { this.pageRetrieverProvider = Objects.requireNonNull(pageRetrieverProvider, "'pageRetrieverProvider' function cannot be null."); this.defaultPageSize = null; } /** * Creates an instance of {@link ContinuablePagedFluxCore}. * * @param pageRetrieverProvider a provider that returns {@link PageRetriever}. * @param pageSize the preferred page size * @throws IllegalArgumentException if defaultPageSize is not greater than zero */ protected ContinuablePagedFluxCore(Supplier<PageRetriever<C, P>> pageRetrieverProvider, int pageSize) { this.pageRetrieverProvider = Objects.requireNonNull(pageRetrieverProvider, "'pageRetrieverProvider' function cannot be null."); if (pageSize <= 0) { throw new IllegalArgumentException("pageSize > 0 required but provided: " + pageSize); } this.defaultPageSize = pageSize; } /** * Get the page size configured this {@link ContinuablePagedFluxCore}. * * @return the page size configured, {@code null} if unspecified. */ public Integer getPageSize() { return this.defaultPageSize; } @Override public Flux<P> byPage() { return byPage(this.pageRetrieverProvider, null, this.defaultPageSize); } @Override public Flux<P> byPage(C continuationToken) { if (continuationToken == null) { return Flux.empty(); } return byPage(this.pageRetrieverProvider, continuationToken, this.defaultPageSize); } @Override public Flux<P> byPage(int preferredPageSize) { if (preferredPageSize <= 0) { return Flux.error(new IllegalArgumentException("preferredPageSize > 0 required but provided: " + preferredPageSize)); } return byPage(this.pageRetrieverProvider, null, preferredPageSize); } @Override public Flux<P> byPage(C continuationToken, int preferredPageSize) { if (preferredPageSize <= 0) { return Flux.error(new IllegalArgumentException("preferredPageSize > 0 required but provided: " + preferredPageSize)); } if (continuationToken == null) { return Flux.empty(); } return byPage(this.pageRetrieverProvider, continuationToken, preferredPageSize); } /** * Subscribe to consume all items of type {@code T} in the sequence respectively. This is recommended for most * common scenarios. This will seamlessly fetch next page when required and provide with a {@link Flux} of items. * * @param coreSubscriber The subscriber for this {@link ContinuablePagedFluxCore} */ @Override public void subscribe(CoreSubscriber<? super T> coreSubscriber) { byPage(this.pageRetrieverProvider, null, this.defaultPageSize) .flatMap(page -> { IterableStream<T> iterableStream = page.getElements(); return iterableStream == null ? Flux.empty() : Flux.fromIterable(page.getElements()); }) .subscribe(coreSubscriber); } /** * Get a Flux of {@link ContinuablePage} created by concat-ing Flux instances returned Page Retriever Function * calls. * * @param provider the provider that when called returns Page Retriever Function * @param continuationToken the token to identify the pages to be retrieved * @param pageSize the preferred page size * @return a Flux of {@link ContinuablePage} identified by the given continuation token */ private Flux<P> byPage(Supplier<PageRetriever<C, P>> provider, C continuationToken, Integer pageSize) { return Flux.defer(() -> { final PageRetriever<C, P> pageRetriever = provider.get(); final ContinuationState<C> state = new ContinuationState<>(continuationToken); return retrievePages(state, pageRetriever, pageSize); }); } /** * Get a Flux of {@link ContinuablePage} created by concat-ing child Flux instances returned Page Retriever Function * calls. The first child Flux of {@link ContinuablePage} is identified by the continuation-token in the state. * * @param state the state to be used across multiple Page Retriever Function calls * @param pageRetriever the Page Retriever Function * @param pageSize the preferred page size * @return a Flux of {@link ContinuablePage} */ private Flux<P> retrievePage(ContinuationState<C> state, PageRetriever<C, P> pageRetriever, Integer pageSize) { if (state.isDone()) { return Flux.empty(); } else { return pageRetriever.get(state.getLastContinuationToken(), pageSize) .switchIfEmpty(Flux.defer(() -> { state.setLastContinuationToken(null); return Mono.empty(); })); } } }
From what I remember being told it is better to have explicitly needed operations performed in `map` or `flatMap` as `doOnX` is meant for side affect operations such as logging or metrics. I don't see continuation token updating as a side affect but an explicit need.
private Flux<P> retrievePages(ContinuationState<C> state, PageRetriever<C, P> pageRetriever, Integer pageSize) { return retrievePage(state, pageRetriever, pageSize) .expand(page -> { state.setLastContinuationToken(page.getContinuationToken()); return Flux.defer(() -> retrievePage(state, pageRetriever, pageSize)); }, 4); }
return retrievePage(state, pageRetriever, pageSize)
private Flux<P> retrievePages(ContinuationState<C> state, PageRetriever<C, P> pageRetriever, Integer pageSize) { /* * The second argument for 'expand' is an initial capacity hint to the expand subscriber to indicate what size * buffer it should instantiate. 4 is used as PageRetriever's 'get' returns a Flux so an implementation may * return multiple pages, but in the case only one page is retrieved the buffer won't need to be resized or * request additional pages from the service. */ return retrievePage(state, pageRetriever, pageSize) .expand(page -> { state.setLastContinuationToken(page.getContinuationToken()); return Flux.defer(() -> retrievePage(state, pageRetriever, pageSize)); }, 4); }
class ContinuablePagedFluxCore<C, T, P extends ContinuablePage<C, T>> extends ContinuablePagedFlux<C, T, P> { final Supplier<PageRetriever<C, P>> pageRetrieverProvider; final Integer defaultPageSize; /** * Creates an instance of {@link ContinuablePagedFluxCore}. * * @param pageRetrieverProvider a provider that returns {@link PageRetriever}. */ protected ContinuablePagedFluxCore(Supplier<PageRetriever<C, P>> pageRetrieverProvider) { this.pageRetrieverProvider = Objects.requireNonNull(pageRetrieverProvider, "'pageRetrieverProvider' function cannot be null."); this.defaultPageSize = null; } /** * Creates an instance of {@link ContinuablePagedFluxCore}. * * @param pageRetrieverProvider a provider that returns {@link PageRetriever}. * @param pageSize the preferred page size * @throws IllegalArgumentException if defaultPageSize is not greater than zero */ protected ContinuablePagedFluxCore(Supplier<PageRetriever<C, P>> pageRetrieverProvider, int pageSize) { this.pageRetrieverProvider = Objects.requireNonNull(pageRetrieverProvider, "'pageRetrieverProvider' function cannot be null."); if (pageSize <= 0) { throw new IllegalArgumentException("pageSize > 0 required but provided: " + pageSize); } this.defaultPageSize = pageSize; } /** * Get the page size configured this {@link ContinuablePagedFluxCore}. * * @return the page size configured, {@code null} if unspecified. */ public Integer getPageSize() { return this.defaultPageSize; } @Override public Flux<P> byPage() { return byPage(this.pageRetrieverProvider, null, this.defaultPageSize); } @Override public Flux<P> byPage(C continuationToken) { if (continuationToken == null) { return Flux.empty(); } return byPage(this.pageRetrieverProvider, continuationToken, this.defaultPageSize); } @Override public Flux<P> byPage(int preferredPageSize) { if (preferredPageSize <= 0) { return Flux.error(new IllegalArgumentException("preferredPageSize > 0 required but provided: " + preferredPageSize)); } return byPage(this.pageRetrieverProvider, null, preferredPageSize); } @Override public Flux<P> byPage(C continuationToken, int preferredPageSize) { if (preferredPageSize <= 0) { return Flux.error(new IllegalArgumentException("preferredPageSize > 0 required but provided: " + preferredPageSize)); } if (continuationToken == null) { return Flux.empty(); } return byPage(this.pageRetrieverProvider, continuationToken, preferredPageSize); } /** * Subscribe to consume all items of type {@code T} in the sequence respectively. This is recommended for most * common scenarios. This will seamlessly fetch next page when required and provide with a {@link Flux} of items. * * @param coreSubscriber The subscriber for this {@link ContinuablePagedFluxCore} */ @Override public void subscribe(CoreSubscriber<? super T> coreSubscriber) { byPage(this.pageRetrieverProvider, null, this.defaultPageSize) .flatMap(page -> { IterableStream<T> iterableStream = page.getElements(); return iterableStream == null ? Flux.empty() : Flux.fromIterable(page.getElements()); }) .subscribe(coreSubscriber); } /** * Get a Flux of {@link ContinuablePage} created by concat-ing Flux instances returned Page Retriever Function * calls. * * @param provider the provider that when called returns Page Retriever Function * @param continuationToken the token to identify the pages to be retrieved * @param pageSize the preferred page size * @return a Flux of {@link ContinuablePage} identified by the given continuation token */ private Flux<P> byPage(Supplier<PageRetriever<C, P>> provider, C continuationToken, Integer pageSize) { return Flux.defer(() -> { final PageRetriever<C, P> pageRetriever = provider.get(); final ContinuationState<C> state = new ContinuationState<>(continuationToken); return retrievePages(state, pageRetriever, pageSize); }); } /** * Get a Flux of {@link ContinuablePage} created by concat-ing child Flux instances returned Page Retriever Function * calls. The first child Flux of {@link ContinuablePage} is identified by the continuation-token in the state. * * @param state the state to be used across multiple Page Retriever Function calls * @param pageRetriever the Page Retriever Function * @param pageSize the preferred page size * @return a Flux of {@link ContinuablePage} */ private Flux<P> retrievePage(ContinuationState<C> state, PageRetriever<C, P> pageRetriever, Integer pageSize) { if (state.isDone()) { return Flux.empty(); } else { return pageRetriever.get(state.getLastContinuationToken(), pageSize) .switchIfEmpty(Flux.defer(() -> { state.setLastContinuationToken(null); return Mono.empty(); })); } } }
class ContinuablePagedFluxCore<C, T, P extends ContinuablePage<C, T>> extends ContinuablePagedFlux<C, T, P> { final Supplier<PageRetriever<C, P>> pageRetrieverProvider; final Integer defaultPageSize; /** * Creates an instance of {@link ContinuablePagedFluxCore}. * * @param pageRetrieverProvider a provider that returns {@link PageRetriever}. */ protected ContinuablePagedFluxCore(Supplier<PageRetriever<C, P>> pageRetrieverProvider) { this.pageRetrieverProvider = Objects.requireNonNull(pageRetrieverProvider, "'pageRetrieverProvider' function cannot be null."); this.defaultPageSize = null; } /** * Creates an instance of {@link ContinuablePagedFluxCore}. * * @param pageRetrieverProvider a provider that returns {@link PageRetriever}. * @param pageSize the preferred page size * @throws IllegalArgumentException if defaultPageSize is not greater than zero */ protected ContinuablePagedFluxCore(Supplier<PageRetriever<C, P>> pageRetrieverProvider, int pageSize) { this.pageRetrieverProvider = Objects.requireNonNull(pageRetrieverProvider, "'pageRetrieverProvider' function cannot be null."); if (pageSize <= 0) { throw new IllegalArgumentException("pageSize > 0 required but provided: " + pageSize); } this.defaultPageSize = pageSize; } /** * Get the page size configured this {@link ContinuablePagedFluxCore}. * * @return the page size configured, {@code null} if unspecified. */ public Integer getPageSize() { return this.defaultPageSize; } @Override public Flux<P> byPage() { return byPage(this.pageRetrieverProvider, null, this.defaultPageSize); } @Override public Flux<P> byPage(C continuationToken) { if (continuationToken == null) { return Flux.empty(); } return byPage(this.pageRetrieverProvider, continuationToken, this.defaultPageSize); } @Override public Flux<P> byPage(int preferredPageSize) { if (preferredPageSize <= 0) { return Flux.error(new IllegalArgumentException("preferredPageSize > 0 required but provided: " + preferredPageSize)); } return byPage(this.pageRetrieverProvider, null, preferredPageSize); } @Override public Flux<P> byPage(C continuationToken, int preferredPageSize) { if (preferredPageSize <= 0) { return Flux.error(new IllegalArgumentException("preferredPageSize > 0 required but provided: " + preferredPageSize)); } if (continuationToken == null) { return Flux.empty(); } return byPage(this.pageRetrieverProvider, continuationToken, preferredPageSize); } /** * Subscribe to consume all items of type {@code T} in the sequence respectively. This is recommended for most * common scenarios. This will seamlessly fetch next page when required and provide with a {@link Flux} of items. * * @param coreSubscriber The subscriber for this {@link ContinuablePagedFluxCore} */ @Override public void subscribe(CoreSubscriber<? super T> coreSubscriber) { byPage(this.pageRetrieverProvider, null, this.defaultPageSize) .flatMap(page -> { IterableStream<T> iterableStream = page.getElements(); return iterableStream == null ? Flux.empty() : Flux.fromIterable(page.getElements()); }) .subscribe(coreSubscriber); } /** * Get a Flux of {@link ContinuablePage} created by concat-ing Flux instances returned Page Retriever Function * calls. * * @param provider the provider that when called returns Page Retriever Function * @param continuationToken the token to identify the pages to be retrieved * @param pageSize the preferred page size * @return a Flux of {@link ContinuablePage} identified by the given continuation token */ private Flux<P> byPage(Supplier<PageRetriever<C, P>> provider, C continuationToken, Integer pageSize) { return Flux.defer(() -> { final PageRetriever<C, P> pageRetriever = provider.get(); final ContinuationState<C> state = new ContinuationState<>(continuationToken); return retrievePages(state, pageRetriever, pageSize); }); } /** * Get a Flux of {@link ContinuablePage} created by concat-ing child Flux instances returned Page Retriever Function * calls. The first child Flux of {@link ContinuablePage} is identified by the continuation-token in the state. * * @param state the state to be used across multiple Page Retriever Function calls * @param pageRetriever the Page Retriever Function * @param pageSize the preferred page size * @return a Flux of {@link ContinuablePage} */ private Flux<P> retrievePage(ContinuationState<C> state, PageRetriever<C, P> pageRetriever, Integer pageSize) { if (state.isDone()) { return Flux.empty(); } else { return pageRetriever.get(state.getLastContinuationToken(), pageSize) .switchIfEmpty(Flux.defer(() -> { state.setLastContinuationToken(null); return Mono.empty(); })); } } }
Yep, I think all we need to ensure is the link is tthere before retrieving the next page, which we do. lgtm.
private Flux<P> retrievePages(ContinuationState<C> state, PageRetriever<C, P> pageRetriever, Integer pageSize) { return retrievePage(state, pageRetriever, pageSize) .expand(page -> { state.setLastContinuationToken(page.getContinuationToken()); return Flux.defer(() -> retrievePage(state, pageRetriever, pageSize)); }, 4); }
return retrievePage(state, pageRetriever, pageSize)
private Flux<P> retrievePages(ContinuationState<C> state, PageRetriever<C, P> pageRetriever, Integer pageSize) { /* * The second argument for 'expand' is an initial capacity hint to the expand subscriber to indicate what size * buffer it should instantiate. 4 is used as PageRetriever's 'get' returns a Flux so an implementation may * return multiple pages, but in the case only one page is retrieved the buffer won't need to be resized or * request additional pages from the service. */ return retrievePage(state, pageRetriever, pageSize) .expand(page -> { state.setLastContinuationToken(page.getContinuationToken()); return Flux.defer(() -> retrievePage(state, pageRetriever, pageSize)); }, 4); }
class ContinuablePagedFluxCore<C, T, P extends ContinuablePage<C, T>> extends ContinuablePagedFlux<C, T, P> { final Supplier<PageRetriever<C, P>> pageRetrieverProvider; final Integer defaultPageSize; /** * Creates an instance of {@link ContinuablePagedFluxCore}. * * @param pageRetrieverProvider a provider that returns {@link PageRetriever}. */ protected ContinuablePagedFluxCore(Supplier<PageRetriever<C, P>> pageRetrieverProvider) { this.pageRetrieverProvider = Objects.requireNonNull(pageRetrieverProvider, "'pageRetrieverProvider' function cannot be null."); this.defaultPageSize = null; } /** * Creates an instance of {@link ContinuablePagedFluxCore}. * * @param pageRetrieverProvider a provider that returns {@link PageRetriever}. * @param pageSize the preferred page size * @throws IllegalArgumentException if defaultPageSize is not greater than zero */ protected ContinuablePagedFluxCore(Supplier<PageRetriever<C, P>> pageRetrieverProvider, int pageSize) { this.pageRetrieverProvider = Objects.requireNonNull(pageRetrieverProvider, "'pageRetrieverProvider' function cannot be null."); if (pageSize <= 0) { throw new IllegalArgumentException("pageSize > 0 required but provided: " + pageSize); } this.defaultPageSize = pageSize; } /** * Get the page size configured this {@link ContinuablePagedFluxCore}. * * @return the page size configured, {@code null} if unspecified. */ public Integer getPageSize() { return this.defaultPageSize; } @Override public Flux<P> byPage() { return byPage(this.pageRetrieverProvider, null, this.defaultPageSize); } @Override public Flux<P> byPage(C continuationToken) { if (continuationToken == null) { return Flux.empty(); } return byPage(this.pageRetrieverProvider, continuationToken, this.defaultPageSize); } @Override public Flux<P> byPage(int preferredPageSize) { if (preferredPageSize <= 0) { return Flux.error(new IllegalArgumentException("preferredPageSize > 0 required but provided: " + preferredPageSize)); } return byPage(this.pageRetrieverProvider, null, preferredPageSize); } @Override public Flux<P> byPage(C continuationToken, int preferredPageSize) { if (preferredPageSize <= 0) { return Flux.error(new IllegalArgumentException("preferredPageSize > 0 required but provided: " + preferredPageSize)); } if (continuationToken == null) { return Flux.empty(); } return byPage(this.pageRetrieverProvider, continuationToken, preferredPageSize); } /** * Subscribe to consume all items of type {@code T} in the sequence respectively. This is recommended for most * common scenarios. This will seamlessly fetch next page when required and provide with a {@link Flux} of items. * * @param coreSubscriber The subscriber for this {@link ContinuablePagedFluxCore} */ @Override public void subscribe(CoreSubscriber<? super T> coreSubscriber) { byPage(this.pageRetrieverProvider, null, this.defaultPageSize) .flatMap(page -> { IterableStream<T> iterableStream = page.getElements(); return iterableStream == null ? Flux.empty() : Flux.fromIterable(page.getElements()); }) .subscribe(coreSubscriber); } /** * Get a Flux of {@link ContinuablePage} created by concat-ing Flux instances returned Page Retriever Function * calls. * * @param provider the provider that when called returns Page Retriever Function * @param continuationToken the token to identify the pages to be retrieved * @param pageSize the preferred page size * @return a Flux of {@link ContinuablePage} identified by the given continuation token */ private Flux<P> byPage(Supplier<PageRetriever<C, P>> provider, C continuationToken, Integer pageSize) { return Flux.defer(() -> { final PageRetriever<C, P> pageRetriever = provider.get(); final ContinuationState<C> state = new ContinuationState<>(continuationToken); return retrievePages(state, pageRetriever, pageSize); }); } /** * Get a Flux of {@link ContinuablePage} created by concat-ing child Flux instances returned Page Retriever Function * calls. The first child Flux of {@link ContinuablePage} is identified by the continuation-token in the state. * * @param state the state to be used across multiple Page Retriever Function calls * @param pageRetriever the Page Retriever Function * @param pageSize the preferred page size * @return a Flux of {@link ContinuablePage} */ private Flux<P> retrievePage(ContinuationState<C> state, PageRetriever<C, P> pageRetriever, Integer pageSize) { if (state.isDone()) { return Flux.empty(); } else { return pageRetriever.get(state.getLastContinuationToken(), pageSize) .switchIfEmpty(Flux.defer(() -> { state.setLastContinuationToken(null); return Mono.empty(); })); } } }
class ContinuablePagedFluxCore<C, T, P extends ContinuablePage<C, T>> extends ContinuablePagedFlux<C, T, P> { final Supplier<PageRetriever<C, P>> pageRetrieverProvider; final Integer defaultPageSize; /** * Creates an instance of {@link ContinuablePagedFluxCore}. * * @param pageRetrieverProvider a provider that returns {@link PageRetriever}. */ protected ContinuablePagedFluxCore(Supplier<PageRetriever<C, P>> pageRetrieverProvider) { this.pageRetrieverProvider = Objects.requireNonNull(pageRetrieverProvider, "'pageRetrieverProvider' function cannot be null."); this.defaultPageSize = null; } /** * Creates an instance of {@link ContinuablePagedFluxCore}. * * @param pageRetrieverProvider a provider that returns {@link PageRetriever}. * @param pageSize the preferred page size * @throws IllegalArgumentException if defaultPageSize is not greater than zero */ protected ContinuablePagedFluxCore(Supplier<PageRetriever<C, P>> pageRetrieverProvider, int pageSize) { this.pageRetrieverProvider = Objects.requireNonNull(pageRetrieverProvider, "'pageRetrieverProvider' function cannot be null."); if (pageSize <= 0) { throw new IllegalArgumentException("pageSize > 0 required but provided: " + pageSize); } this.defaultPageSize = pageSize; } /** * Get the page size configured this {@link ContinuablePagedFluxCore}. * * @return the page size configured, {@code null} if unspecified. */ public Integer getPageSize() { return this.defaultPageSize; } @Override public Flux<P> byPage() { return byPage(this.pageRetrieverProvider, null, this.defaultPageSize); } @Override public Flux<P> byPage(C continuationToken) { if (continuationToken == null) { return Flux.empty(); } return byPage(this.pageRetrieverProvider, continuationToken, this.defaultPageSize); } @Override public Flux<P> byPage(int preferredPageSize) { if (preferredPageSize <= 0) { return Flux.error(new IllegalArgumentException("preferredPageSize > 0 required but provided: " + preferredPageSize)); } return byPage(this.pageRetrieverProvider, null, preferredPageSize); } @Override public Flux<P> byPage(C continuationToken, int preferredPageSize) { if (preferredPageSize <= 0) { return Flux.error(new IllegalArgumentException("preferredPageSize > 0 required but provided: " + preferredPageSize)); } if (continuationToken == null) { return Flux.empty(); } return byPage(this.pageRetrieverProvider, continuationToken, preferredPageSize); } /** * Subscribe to consume all items of type {@code T} in the sequence respectively. This is recommended for most * common scenarios. This will seamlessly fetch next page when required and provide with a {@link Flux} of items. * * @param coreSubscriber The subscriber for this {@link ContinuablePagedFluxCore} */ @Override public void subscribe(CoreSubscriber<? super T> coreSubscriber) { byPage(this.pageRetrieverProvider, null, this.defaultPageSize) .flatMap(page -> { IterableStream<T> iterableStream = page.getElements(); return iterableStream == null ? Flux.empty() : Flux.fromIterable(page.getElements()); }) .subscribe(coreSubscriber); } /** * Get a Flux of {@link ContinuablePage} created by concat-ing Flux instances returned Page Retriever Function * calls. * * @param provider the provider that when called returns Page Retriever Function * @param continuationToken the token to identify the pages to be retrieved * @param pageSize the preferred page size * @return a Flux of {@link ContinuablePage} identified by the given continuation token */ private Flux<P> byPage(Supplier<PageRetriever<C, P>> provider, C continuationToken, Integer pageSize) { return Flux.defer(() -> { final PageRetriever<C, P> pageRetriever = provider.get(); final ContinuationState<C> state = new ContinuationState<>(continuationToken); return retrievePages(state, pageRetriever, pageSize); }); } /** * Get a Flux of {@link ContinuablePage} created by concat-ing child Flux instances returned Page Retriever Function * calls. The first child Flux of {@link ContinuablePage} is identified by the continuation-token in the state. * * @param state the state to be used across multiple Page Retriever Function calls * @param pageRetriever the Page Retriever Function * @param pageSize the preferred page size * @return a Flux of {@link ContinuablePage} */ private Flux<P> retrievePage(ContinuationState<C> state, PageRetriever<C, P> pageRetriever, Integer pageSize) { if (state.isDone()) { return Flux.empty(); } else { return pageRetriever.get(state.getLastContinuationToken(), pageSize) .switchIfEmpty(Flux.defer(() -> { state.setLastContinuationToken(null); return Mono.empty(); })); } } }
We should mention this default value in the `getter` and `setter`
public DirectConnectionConfig() { this.connectionEndpointRediscoveryEnabled = true; this.connectTimeout = DEFAULT_CONNECT_TIMEOUT; this.idleConnectionTimeout = Duration.ZERO; this.idleEndpointTimeout = DEFAULT_IDLE_ENDPOINT_TIMEOUT; this.maxConnectionsPerEndpoint = DEFAULT_MAX_CONNECTIONS_PER_ENDPOINT; this.maxRequestsPerConnection = DEFAULT_MAX_REQUESTS_PER_CONNECTION; this.requestTimeout = DEFAULT_REQUEST_TIMEOUT; }
this.connectionEndpointRediscoveryEnabled = true;
public DirectConnectionConfig() { this.connectionEndpointRediscoveryEnabled = DEFAULT_CONNECTION_ENDPOINT_REDISCOVERY_ENABLED; this.connectTimeout = DEFAULT_CONNECT_TIMEOUT; this.idleConnectionTimeout = Duration.ZERO; this.idleEndpointTimeout = DEFAULT_IDLE_ENDPOINT_TIMEOUT; this.maxConnectionsPerEndpoint = DEFAULT_MAX_CONNECTIONS_PER_ENDPOINT; this.maxRequestsPerConnection = DEFAULT_MAX_REQUESTS_PER_CONNECTION; this.requestTimeout = DEFAULT_REQUEST_TIMEOUT; }
class DirectConnectionConfig { private static final Duration DEFAULT_IDLE_ENDPOINT_TIMEOUT = Duration.ofHours(1l); private static final Duration DEFAULT_CONNECT_TIMEOUT = Duration.ofSeconds(5L); private static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofSeconds(5L); private static final int DEFAULT_MAX_CONNECTIONS_PER_ENDPOINT = 130; private static final int DEFAULT_MAX_REQUESTS_PER_CONNECTION = 30; private boolean connectionEndpointRediscoveryEnabled; private Duration connectTimeout; private Duration idleConnectionTimeout; private Duration idleEndpointTimeout; private Duration requestTimeout; private int maxConnectionsPerEndpoint; private int maxRequestsPerConnection; /** * Constructor */ /** * Gets a value indicating whether Direct TCP connection endpoint rediscovery is enabled. * <p> * The connection endpoint rediscovery feature is designed to reduce and spread-out latency spikes that are likely * to occur: * <ul> * <li>During rolling upgrades of a Cosmos instance or * <li>When a backend node is being decommissioned or restarted (e.g., to restart or remove an unhealthy replica.) * </ul> * * @return {@code true} if Direct TCP connection endpoint rediscovery is enabled; {@code false} otherwise. */ public boolean isConnectionEndpointRediscoveryEnabled() { return this.connectionEndpointRediscoveryEnabled; } /** * Sets a value indicating whether Direct TCP connection endpoint rediscovery should be enabled. * <p> * The connection endpoint rediscovery feature is designed to reduce and spread-out latency spikes that are likely * to occur: * <ul> * <li>During rolling upgrades of a Cosmos instance or * <li>When a backend node is being decommissioned or restarted (e.g., to restart or remove an unhealthy replica.) * </ul> * * @param connectionEndpointRediscoveryEnabled {@code true} if connection endpoint rediscovery is enabled; {@code * false} otherwise. * * @return the {@linkplain DirectConnectionConfig}. */ public DirectConnectionConfig setConnectionEndpointRediscoveryEnabled(boolean connectionEndpointRediscoveryEnabled) { this.connectionEndpointRediscoveryEnabled = connectionEndpointRediscoveryEnabled; return this; } /** * Gets the default DIRECT connection configuration. * * @return the default direct connection configuration. */ public static DirectConnectionConfig getDefaultConfig() { return new DirectConnectionConfig(); } /** * Gets the connect timeout for direct client, * represents timeout for establishing connections with an endpoint. * * Configures timeout for underlying Netty Channel {@link ChannelOption * * By default, the connect timeout is 5 seconds. * * @return direct connect timeout */ public Duration getConnectTimeout() { return connectTimeout; } /** * Sets the connect timeout for direct client, * represents timeout for establishing connections with an endpoint. * * Configures timeout for underlying Netty Channel {@link ChannelOption * * By default, the connect timeout is 5 seconds. * * @param connectTimeout the connection timeout * @return the {@link DirectConnectionConfig} */ public DirectConnectionConfig setConnectTimeout(Duration connectTimeout) { this.connectTimeout = connectTimeout; return this; } /** * Gets the idle connection timeout for direct client * * Default value is {@link Duration * * Direct client doesn't close a single connection to an endpoint * by default unless specified. * * @return idle connection timeout */ public Duration getIdleConnectionTimeout() { return idleConnectionTimeout; } /** * Sets the idle connection timeout * * Default value is {@link Duration * * Direct client doesn't close a single connection to an endpoint * by default unless specified. * * @param idleConnectionTimeout idle connection timeout * @return the {@link DirectConnectionConfig} */ public DirectConnectionConfig setIdleConnectionTimeout(Duration idleConnectionTimeout) { this.idleConnectionTimeout = idleConnectionTimeout; return this; } /** * Gets the idle endpoint timeout * * Default value is 1 hour. * * If there are no requests to a specific endpoint for idle endpoint timeout duration, * direct client closes all connections to that endpoint to save resources and I/O cost. * * @return the idle endpoint timeout */ public Duration getIdleEndpointTimeout() { return idleEndpointTimeout; } /** * Sets the idle endpoint timeout * * Default value is 1 hour. * * If there are no requests to a specific endpoint for idle endpoint timeout duration, * direct client closes all connections to that endpoint to save resources and I/O cost. * * @param idleEndpointTimeout the idle endpoint timeout * @return the {@link DirectConnectionConfig} */ public DirectConnectionConfig setIdleEndpointTimeout(Duration idleEndpointTimeout) { this.idleEndpointTimeout = idleEndpointTimeout; return this; } /** * Gets the max connections per endpoint * This represents the size of connection pool for a specific endpoint * * Default value is 30 * * @return the max connections per endpoint */ public int getMaxConnectionsPerEndpoint() { return maxConnectionsPerEndpoint; } /** * Sets the max connections per endpoint * This represents the size of connection pool for a specific endpoint * * Default value is 30 * * @param maxConnectionsPerEndpoint the max connections per endpoint * @return the {@link DirectConnectionConfig} */ public DirectConnectionConfig setMaxConnectionsPerEndpoint(int maxConnectionsPerEndpoint) { this.maxConnectionsPerEndpoint = maxConnectionsPerEndpoint; return this; } /** * Gets the max requests per connection * This represents the number of requests that will be queued * on a single connection for a specific endpoint * * Default value is 10 * * @return the max requests per endpoint */ public int getMaxRequestsPerConnection() { return maxRequestsPerConnection; } /** * Sets the max requests per connection * This represents the number of requests that will be queued * on a single connection for a specific endpoint * * Default value is 10 * * @param maxRequestsPerConnection the max requests per endpoint * @return the {@link DirectConnectionConfig} */ public DirectConnectionConfig setMaxRequestsPerConnection(int maxRequestsPerConnection) { this.maxRequestsPerConnection = maxRequestsPerConnection; return this; } /** * Gets the request timeout interval * This represents the timeout interval for requests * * Default value is 60 seconds * * @return the request timeout interval */ Duration getRequestTimeout() { return requestTimeout; } /** * Sets the request timeout interval * This represents the timeout interval for requests * * Default value is 5 seconds * * @param requestTimeout the request timeout interval * @return the {@link DirectConnectionConfig} */ DirectConnectionConfig setRequestTimeout(Duration requestTimeout) { this.requestTimeout = requestTimeout; return this; } @Override public String toString() { return "DirectConnectionConfig{" + "connectTimeout=" + connectTimeout + ", idleConnectionTimeout=" + idleConnectionTimeout + ", idleEndpointTimeout=" + idleEndpointTimeout + ", maxConnectionsPerEndpoint=" + maxConnectionsPerEndpoint + ", maxRequestsPerConnection=" + maxRequestsPerConnection + '}'; } }
class DirectConnectionConfig { private static final Boolean DEFAULT_CONNECTION_ENDPOINT_REDISCOVERY_ENABLED = false; private static final Duration DEFAULT_IDLE_ENDPOINT_TIMEOUT = Duration.ofHours(1l); private static final Duration DEFAULT_CONNECT_TIMEOUT = Duration.ofSeconds(5L); private static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofSeconds(5L); private static final int DEFAULT_MAX_CONNECTIONS_PER_ENDPOINT = 130; private static final int DEFAULT_MAX_REQUESTS_PER_CONNECTION = 30; private boolean connectionEndpointRediscoveryEnabled; private Duration connectTimeout; private Duration idleConnectionTimeout; private Duration idleEndpointTimeout; private Duration requestTimeout; private int maxConnectionsPerEndpoint; private int maxRequestsPerConnection; /** * Constructor */ /** * Gets a value indicating whether Direct TCP connection endpoint rediscovery is enabled. * <p> * The connection endpoint rediscovery feature is designed to reduce and spread-out latency spikes that may occur during maintenance operations. * * By default, connection endpoint rediscovery is disabled. * * @return {@code true} if Direct TCP connection endpoint rediscovery is enabled; {@code false} otherwise. */ @Beta(Beta.SinceVersion.V4_8_0) public boolean isConnectionEndpointRediscoveryEnabled() { return this.connectionEndpointRediscoveryEnabled; } /** * Sets a value indicating whether Direct TCP connection endpoint rediscovery should be enabled. * <p> * The connection endpoint rediscovery feature is designed to reduce and spread-out latency spikes that may occur during maintenance operations. * * By default, connection endpoint rediscovery is disabled. * * @param connectionEndpointRediscoveryEnabled {@code true} if connection endpoint rediscovery is enabled; {@code * false} otherwise. * * @return the {@linkplain DirectConnectionConfig}. */ @Beta(Beta.SinceVersion.V4_8_0) public DirectConnectionConfig setConnectionEndpointRediscoveryEnabled(boolean connectionEndpointRediscoveryEnabled) { this.connectionEndpointRediscoveryEnabled = connectionEndpointRediscoveryEnabled; return this; } /** * Gets the default DIRECT connection configuration. * * @return the default direct connection configuration. */ public static DirectConnectionConfig getDefaultConfig() { return new DirectConnectionConfig(); } /** * Gets the connect timeout for direct client, * represents timeout for establishing connections with an endpoint. * * Configures timeout for underlying Netty Channel {@link ChannelOption * * By default, the connect timeout is 5 seconds. * * @return direct connect timeout */ public Duration getConnectTimeout() { return connectTimeout; } /** * Sets the connect timeout for direct client, * represents timeout for establishing connections with an endpoint. * * Configures timeout for underlying Netty Channel {@link ChannelOption * * By default, the connect timeout is 5 seconds. * * @param connectTimeout the connection timeout * @return the {@link DirectConnectionConfig} */ public DirectConnectionConfig setConnectTimeout(Duration connectTimeout) { this.connectTimeout = connectTimeout; return this; } /** * Gets the idle connection timeout for direct client * * Default value is {@link Duration * * Direct client doesn't close a single connection to an endpoint * by default unless specified. * * @return idle connection timeout */ public Duration getIdleConnectionTimeout() { return idleConnectionTimeout; } /** * Sets the idle connection timeout * * Default value is {@link Duration * * Direct client doesn't close a single connection to an endpoint * by default unless specified. * * @param idleConnectionTimeout idle connection timeout * @return the {@link DirectConnectionConfig} */ public DirectConnectionConfig setIdleConnectionTimeout(Duration idleConnectionTimeout) { this.idleConnectionTimeout = idleConnectionTimeout; return this; } /** * Gets the idle endpoint timeout * * Default value is 1 hour. * * If there are no requests to a specific endpoint for idle endpoint timeout duration, * direct client closes all connections to that endpoint to save resources and I/O cost. * * @return the idle endpoint timeout */ public Duration getIdleEndpointTimeout() { return idleEndpointTimeout; } /** * Sets the idle endpoint timeout * * Default value is 1 hour. * * If there are no requests to a specific endpoint for idle endpoint timeout duration, * direct client closes all connections to that endpoint to save resources and I/O cost. * * @param idleEndpointTimeout the idle endpoint timeout * @return the {@link DirectConnectionConfig} */ public DirectConnectionConfig setIdleEndpointTimeout(Duration idleEndpointTimeout) { this.idleEndpointTimeout = idleEndpointTimeout; return this; } /** * Gets the max connections per endpoint * This represents the size of connection pool for a specific endpoint * * Default value is 30 * * @return the max connections per endpoint */ public int getMaxConnectionsPerEndpoint() { return maxConnectionsPerEndpoint; } /** * Sets the max connections per endpoint * This represents the size of connection pool for a specific endpoint * * Default value is 30 * * @param maxConnectionsPerEndpoint the max connections per endpoint * @return the {@link DirectConnectionConfig} */ public DirectConnectionConfig setMaxConnectionsPerEndpoint(int maxConnectionsPerEndpoint) { this.maxConnectionsPerEndpoint = maxConnectionsPerEndpoint; return this; } /** * Gets the max requests per connection * This represents the number of requests that will be queued * on a single connection for a specific endpoint * * Default value is 10 * * @return the max requests per endpoint */ public int getMaxRequestsPerConnection() { return maxRequestsPerConnection; } /** * Sets the max requests per connection * This represents the number of requests that will be queued * on a single connection for a specific endpoint * * Default value is 10 * * @param maxRequestsPerConnection the max requests per endpoint * @return the {@link DirectConnectionConfig} */ public DirectConnectionConfig setMaxRequestsPerConnection(int maxRequestsPerConnection) { this.maxRequestsPerConnection = maxRequestsPerConnection; return this; } /** * Gets the request timeout interval * This represents the timeout interval for requests * * Default value is 60 seconds * * @return the request timeout interval */ Duration getRequestTimeout() { return requestTimeout; } /** * Sets the request timeout interval * This represents the timeout interval for requests * * Default value is 5 seconds * * @param requestTimeout the request timeout interval * @return the {@link DirectConnectionConfig} */ DirectConnectionConfig setRequestTimeout(Duration requestTimeout) { this.requestTimeout = requestTimeout; return this; } @Override public String toString() { return "DirectConnectionConfig{" + "connectTimeout=" + connectTimeout + ", idleConnectionTimeout=" + idleConnectionTimeout + ", idleEndpointTimeout=" + idleEndpointTimeout + ", maxConnectionsPerEndpoint=" + maxConnectionsPerEndpoint + ", maxRequestsPerConnection=" + maxRequestsPerConnection + '}'; } }
added DEFAULT_CONNECTION_ENDPOINT_REDISCOVERY_ENABLED, and mentioned in the getter and setter method.
public DirectConnectionConfig() { this.connectionEndpointRediscoveryEnabled = true; this.connectTimeout = DEFAULT_CONNECT_TIMEOUT; this.idleConnectionTimeout = Duration.ZERO; this.idleEndpointTimeout = DEFAULT_IDLE_ENDPOINT_TIMEOUT; this.maxConnectionsPerEndpoint = DEFAULT_MAX_CONNECTIONS_PER_ENDPOINT; this.maxRequestsPerConnection = DEFAULT_MAX_REQUESTS_PER_CONNECTION; this.requestTimeout = DEFAULT_REQUEST_TIMEOUT; }
this.connectionEndpointRediscoveryEnabled = true;
public DirectConnectionConfig() { this.connectionEndpointRediscoveryEnabled = DEFAULT_CONNECTION_ENDPOINT_REDISCOVERY_ENABLED; this.connectTimeout = DEFAULT_CONNECT_TIMEOUT; this.idleConnectionTimeout = Duration.ZERO; this.idleEndpointTimeout = DEFAULT_IDLE_ENDPOINT_TIMEOUT; this.maxConnectionsPerEndpoint = DEFAULT_MAX_CONNECTIONS_PER_ENDPOINT; this.maxRequestsPerConnection = DEFAULT_MAX_REQUESTS_PER_CONNECTION; this.requestTimeout = DEFAULT_REQUEST_TIMEOUT; }
class DirectConnectionConfig { private static final Duration DEFAULT_IDLE_ENDPOINT_TIMEOUT = Duration.ofHours(1l); private static final Duration DEFAULT_CONNECT_TIMEOUT = Duration.ofSeconds(5L); private static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofSeconds(5L); private static final int DEFAULT_MAX_CONNECTIONS_PER_ENDPOINT = 130; private static final int DEFAULT_MAX_REQUESTS_PER_CONNECTION = 30; private boolean connectionEndpointRediscoveryEnabled; private Duration connectTimeout; private Duration idleConnectionTimeout; private Duration idleEndpointTimeout; private Duration requestTimeout; private int maxConnectionsPerEndpoint; private int maxRequestsPerConnection; /** * Constructor */ /** * Gets a value indicating whether Direct TCP connection endpoint rediscovery is enabled. * <p> * The connection endpoint rediscovery feature is designed to reduce and spread-out latency spikes that are likely * to occur: * <ul> * <li>During rolling upgrades of a Cosmos instance or * <li>When a backend node is being decommissioned or restarted (e.g., to restart or remove an unhealthy replica.) * </ul> * * @return {@code true} if Direct TCP connection endpoint rediscovery is enabled; {@code false} otherwise. */ public boolean isConnectionEndpointRediscoveryEnabled() { return this.connectionEndpointRediscoveryEnabled; } /** * Sets a value indicating whether Direct TCP connection endpoint rediscovery should be enabled. * <p> * The connection endpoint rediscovery feature is designed to reduce and spread-out latency spikes that are likely * to occur: * <ul> * <li>During rolling upgrades of a Cosmos instance or * <li>When a backend node is being decommissioned or restarted (e.g., to restart or remove an unhealthy replica.) * </ul> * * @param connectionEndpointRediscoveryEnabled {@code true} if connection endpoint rediscovery is enabled; {@code * false} otherwise. * * @return the {@linkplain DirectConnectionConfig}. */ public DirectConnectionConfig setConnectionEndpointRediscoveryEnabled(boolean connectionEndpointRediscoveryEnabled) { this.connectionEndpointRediscoveryEnabled = connectionEndpointRediscoveryEnabled; return this; } /** * Gets the default DIRECT connection configuration. * * @return the default direct connection configuration. */ public static DirectConnectionConfig getDefaultConfig() { return new DirectConnectionConfig(); } /** * Gets the connect timeout for direct client, * represents timeout for establishing connections with an endpoint. * * Configures timeout for underlying Netty Channel {@link ChannelOption * * By default, the connect timeout is 5 seconds. * * @return direct connect timeout */ public Duration getConnectTimeout() { return connectTimeout; } /** * Sets the connect timeout for direct client, * represents timeout for establishing connections with an endpoint. * * Configures timeout for underlying Netty Channel {@link ChannelOption * * By default, the connect timeout is 5 seconds. * * @param connectTimeout the connection timeout * @return the {@link DirectConnectionConfig} */ public DirectConnectionConfig setConnectTimeout(Duration connectTimeout) { this.connectTimeout = connectTimeout; return this; } /** * Gets the idle connection timeout for direct client * * Default value is {@link Duration * * Direct client doesn't close a single connection to an endpoint * by default unless specified. * * @return idle connection timeout */ public Duration getIdleConnectionTimeout() { return idleConnectionTimeout; } /** * Sets the idle connection timeout * * Default value is {@link Duration * * Direct client doesn't close a single connection to an endpoint * by default unless specified. * * @param idleConnectionTimeout idle connection timeout * @return the {@link DirectConnectionConfig} */ public DirectConnectionConfig setIdleConnectionTimeout(Duration idleConnectionTimeout) { this.idleConnectionTimeout = idleConnectionTimeout; return this; } /** * Gets the idle endpoint timeout * * Default value is 1 hour. * * If there are no requests to a specific endpoint for idle endpoint timeout duration, * direct client closes all connections to that endpoint to save resources and I/O cost. * * @return the idle endpoint timeout */ public Duration getIdleEndpointTimeout() { return idleEndpointTimeout; } /** * Sets the idle endpoint timeout * * Default value is 1 hour. * * If there are no requests to a specific endpoint for idle endpoint timeout duration, * direct client closes all connections to that endpoint to save resources and I/O cost. * * @param idleEndpointTimeout the idle endpoint timeout * @return the {@link DirectConnectionConfig} */ public DirectConnectionConfig setIdleEndpointTimeout(Duration idleEndpointTimeout) { this.idleEndpointTimeout = idleEndpointTimeout; return this; } /** * Gets the max connections per endpoint * This represents the size of connection pool for a specific endpoint * * Default value is 30 * * @return the max connections per endpoint */ public int getMaxConnectionsPerEndpoint() { return maxConnectionsPerEndpoint; } /** * Sets the max connections per endpoint * This represents the size of connection pool for a specific endpoint * * Default value is 30 * * @param maxConnectionsPerEndpoint the max connections per endpoint * @return the {@link DirectConnectionConfig} */ public DirectConnectionConfig setMaxConnectionsPerEndpoint(int maxConnectionsPerEndpoint) { this.maxConnectionsPerEndpoint = maxConnectionsPerEndpoint; return this; } /** * Gets the max requests per connection * This represents the number of requests that will be queued * on a single connection for a specific endpoint * * Default value is 10 * * @return the max requests per endpoint */ public int getMaxRequestsPerConnection() { return maxRequestsPerConnection; } /** * Sets the max requests per connection * This represents the number of requests that will be queued * on a single connection for a specific endpoint * * Default value is 10 * * @param maxRequestsPerConnection the max requests per endpoint * @return the {@link DirectConnectionConfig} */ public DirectConnectionConfig setMaxRequestsPerConnection(int maxRequestsPerConnection) { this.maxRequestsPerConnection = maxRequestsPerConnection; return this; } /** * Gets the request timeout interval * This represents the timeout interval for requests * * Default value is 60 seconds * * @return the request timeout interval */ Duration getRequestTimeout() { return requestTimeout; } /** * Sets the request timeout interval * This represents the timeout interval for requests * * Default value is 5 seconds * * @param requestTimeout the request timeout interval * @return the {@link DirectConnectionConfig} */ DirectConnectionConfig setRequestTimeout(Duration requestTimeout) { this.requestTimeout = requestTimeout; return this; } @Override public String toString() { return "DirectConnectionConfig{" + "connectTimeout=" + connectTimeout + ", idleConnectionTimeout=" + idleConnectionTimeout + ", idleEndpointTimeout=" + idleEndpointTimeout + ", maxConnectionsPerEndpoint=" + maxConnectionsPerEndpoint + ", maxRequestsPerConnection=" + maxRequestsPerConnection + '}'; } }
class DirectConnectionConfig { private static final Boolean DEFAULT_CONNECTION_ENDPOINT_REDISCOVERY_ENABLED = false; private static final Duration DEFAULT_IDLE_ENDPOINT_TIMEOUT = Duration.ofHours(1l); private static final Duration DEFAULT_CONNECT_TIMEOUT = Duration.ofSeconds(5L); private static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofSeconds(5L); private static final int DEFAULT_MAX_CONNECTIONS_PER_ENDPOINT = 130; private static final int DEFAULT_MAX_REQUESTS_PER_CONNECTION = 30; private boolean connectionEndpointRediscoveryEnabled; private Duration connectTimeout; private Duration idleConnectionTimeout; private Duration idleEndpointTimeout; private Duration requestTimeout; private int maxConnectionsPerEndpoint; private int maxRequestsPerConnection; /** * Constructor */ /** * Gets a value indicating whether Direct TCP connection endpoint rediscovery is enabled. * <p> * The connection endpoint rediscovery feature is designed to reduce and spread-out latency spikes that may occur during maintenance operations. * * By default, connection endpoint rediscovery is disabled. * * @return {@code true} if Direct TCP connection endpoint rediscovery is enabled; {@code false} otherwise. */ @Beta(Beta.SinceVersion.V4_8_0) public boolean isConnectionEndpointRediscoveryEnabled() { return this.connectionEndpointRediscoveryEnabled; } /** * Sets a value indicating whether Direct TCP connection endpoint rediscovery should be enabled. * <p> * The connection endpoint rediscovery feature is designed to reduce and spread-out latency spikes that may occur during maintenance operations. * * By default, connection endpoint rediscovery is disabled. * * @param connectionEndpointRediscoveryEnabled {@code true} if connection endpoint rediscovery is enabled; {@code * false} otherwise. * * @return the {@linkplain DirectConnectionConfig}. */ @Beta(Beta.SinceVersion.V4_8_0) public DirectConnectionConfig setConnectionEndpointRediscoveryEnabled(boolean connectionEndpointRediscoveryEnabled) { this.connectionEndpointRediscoveryEnabled = connectionEndpointRediscoveryEnabled; return this; } /** * Gets the default DIRECT connection configuration. * * @return the default direct connection configuration. */ public static DirectConnectionConfig getDefaultConfig() { return new DirectConnectionConfig(); } /** * Gets the connect timeout for direct client, * represents timeout for establishing connections with an endpoint. * * Configures timeout for underlying Netty Channel {@link ChannelOption * * By default, the connect timeout is 5 seconds. * * @return direct connect timeout */ public Duration getConnectTimeout() { return connectTimeout; } /** * Sets the connect timeout for direct client, * represents timeout for establishing connections with an endpoint. * * Configures timeout for underlying Netty Channel {@link ChannelOption * * By default, the connect timeout is 5 seconds. * * @param connectTimeout the connection timeout * @return the {@link DirectConnectionConfig} */ public DirectConnectionConfig setConnectTimeout(Duration connectTimeout) { this.connectTimeout = connectTimeout; return this; } /** * Gets the idle connection timeout for direct client * * Default value is {@link Duration * * Direct client doesn't close a single connection to an endpoint * by default unless specified. * * @return idle connection timeout */ public Duration getIdleConnectionTimeout() { return idleConnectionTimeout; } /** * Sets the idle connection timeout * * Default value is {@link Duration * * Direct client doesn't close a single connection to an endpoint * by default unless specified. * * @param idleConnectionTimeout idle connection timeout * @return the {@link DirectConnectionConfig} */ public DirectConnectionConfig setIdleConnectionTimeout(Duration idleConnectionTimeout) { this.idleConnectionTimeout = idleConnectionTimeout; return this; } /** * Gets the idle endpoint timeout * * Default value is 1 hour. * * If there are no requests to a specific endpoint for idle endpoint timeout duration, * direct client closes all connections to that endpoint to save resources and I/O cost. * * @return the idle endpoint timeout */ public Duration getIdleEndpointTimeout() { return idleEndpointTimeout; } /** * Sets the idle endpoint timeout * * Default value is 1 hour. * * If there are no requests to a specific endpoint for idle endpoint timeout duration, * direct client closes all connections to that endpoint to save resources and I/O cost. * * @param idleEndpointTimeout the idle endpoint timeout * @return the {@link DirectConnectionConfig} */ public DirectConnectionConfig setIdleEndpointTimeout(Duration idleEndpointTimeout) { this.idleEndpointTimeout = idleEndpointTimeout; return this; } /** * Gets the max connections per endpoint * This represents the size of connection pool for a specific endpoint * * Default value is 30 * * @return the max connections per endpoint */ public int getMaxConnectionsPerEndpoint() { return maxConnectionsPerEndpoint; } /** * Sets the max connections per endpoint * This represents the size of connection pool for a specific endpoint * * Default value is 30 * * @param maxConnectionsPerEndpoint the max connections per endpoint * @return the {@link DirectConnectionConfig} */ public DirectConnectionConfig setMaxConnectionsPerEndpoint(int maxConnectionsPerEndpoint) { this.maxConnectionsPerEndpoint = maxConnectionsPerEndpoint; return this; } /** * Gets the max requests per connection * This represents the number of requests that will be queued * on a single connection for a specific endpoint * * Default value is 10 * * @return the max requests per endpoint */ public int getMaxRequestsPerConnection() { return maxRequestsPerConnection; } /** * Sets the max requests per connection * This represents the number of requests that will be queued * on a single connection for a specific endpoint * * Default value is 10 * * @param maxRequestsPerConnection the max requests per endpoint * @return the {@link DirectConnectionConfig} */ public DirectConnectionConfig setMaxRequestsPerConnection(int maxRequestsPerConnection) { this.maxRequestsPerConnection = maxRequestsPerConnection; return this; } /** * Gets the request timeout interval * This represents the timeout interval for requests * * Default value is 60 seconds * * @return the request timeout interval */ Duration getRequestTimeout() { return requestTimeout; } /** * Sets the request timeout interval * This represents the timeout interval for requests * * Default value is 5 seconds * * @param requestTimeout the request timeout interval * @return the {@link DirectConnectionConfig} */ DirectConnectionConfig setRequestTimeout(Duration requestTimeout) { this.requestTimeout = requestTimeout; return this; } @Override public String toString() { return "DirectConnectionConfig{" + "connectTimeout=" + connectTimeout + ", idleConnectionTimeout=" + idleConnectionTimeout + ", idleEndpointTimeout=" + idleEndpointTimeout + ", maxConnectionsPerEndpoint=" + maxConnectionsPerEndpoint + ", maxRequestsPerConnection=" + maxRequestsPerConnection + '}'; } }
changed to instance of ClosedChannelException
public void onException(final RxDocumentServiceRequest request, Throwable exception) { checkNotNull(request, "expect non-null request"); checkNotNull(exception, "expect non-null exception"); if (exception instanceof GoneException) { final Throwable cause = exception.getCause(); if (cause != null) { if (cause instanceof IOException) { final Class<?> type = cause.getClass(); if (type == ClosedChannelException.class) { this.onConnectionEvent(RntbdConnectionEvent.READ_EOF, request, exception); } else { if (logger.isDebugEnabled()) { logger.debug("Will not raise the connection state change event for error {}", cause); } } } } } }
if (type == ClosedChannelException.class) {
public void onException(final RxDocumentServiceRequest request, Throwable exception) { checkNotNull(request, "expect non-null request"); checkNotNull(exception, "expect non-null exception"); if (exception instanceof GoneException) { final Throwable cause = exception.getCause(); if (cause != null) { if (cause instanceof IOException) { if (cause instanceof ClosedChannelException) { this.onConnectionEvent(RntbdConnectionEvent.READ_EOF, request, exception); } else { if (logger.isDebugEnabled()) { logger.debug("Will not raise the connection state change event for error {}", cause); } } } } } }
class RntbdConnectionStateListener { private static final Logger logger = LoggerFactory.getLogger(RntbdConnectionStateListener.class); private final IAddressResolver addressResolver; private final RntbdEndpoint endpoint; private final Set<PartitionKeyRangeIdentity> partitionAddressCache; private final AtomicBoolean updatingAddressCache = new AtomicBoolean(false); public RntbdConnectionStateListener(final IAddressResolver addressResolver, final RntbdEndpoint endpoint) { this.addressResolver = checkNotNull(addressResolver, "expected non-null addressResolver"); this.endpoint = checkNotNull(endpoint, "expected non-null endpoint"); this.partitionAddressCache = ConcurrentHashMap.newKeySet(); } public void updateConnectionState(final RxDocumentServiceRequest request) { checkNotNull("expect non-null request"); PartitionKeyRangeIdentity partitionKeyRangeIdentity = this.getPartitionKeyRangeIdentity(request); checkNotNull(partitionKeyRangeIdentity, "expected non-null partitionKeyRangeIdentity"); this.partitionAddressCache.add(partitionKeyRangeIdentity); if (logger.isDebugEnabled()) { logger.debug( "updateConnectionState({\"time\":{},\"endpoint\":{},\"partitionKeyRangeIdentity\":{}})", RntbdObjectMapper.toJson(Instant.now()), RntbdObjectMapper.toJson(endpoint), RntbdObjectMapper.toJson(partitionKeyRangeIdentity)); } } private PartitionKeyRangeIdentity getPartitionKeyRangeIdentity(final RxDocumentServiceRequest request) { checkNotNull(request, "expect non-null request"); PartitionKeyRangeIdentity partitionKeyRangeIdentity = request.getPartitionKeyRangeIdentity(); if (partitionKeyRangeIdentity == null) { final String partitionKeyRange = checkNotNull( request.requestContext.resolvedPartitionKeyRange, "expected non-null resolvedPartitionKeyRange").getId(); final String collectionRid = request.requestContext.resolvedCollectionRid; partitionKeyRangeIdentity = collectionRid != null ? new PartitionKeyRangeIdentity(collectionRid, partitionKeyRange) : new PartitionKeyRangeIdentity(partitionKeyRange); } return partitionKeyRangeIdentity; } private void onConnectionEvent(final RntbdConnectionEvent event, final RxDocumentServiceRequest request, final Throwable exception) { checkNotNull(request, "expected non-null exception"); checkNotNull(exception, "expected non-null exception"); if (event == RntbdConnectionEvent.READ_EOF) { if (!this.endpoint.isClosed()) { if (logger.isDebugEnabled()) { logger.debug("onConnectionEvent({\"event\":{},\"time\":{},\"endpoint\":{},\"cause\":{})", event, RntbdObjectMapper.toJson(Instant.now()), RntbdObjectMapper.toJson(this.endpoint), RntbdObjectMapper.toJson(exception)); } this.updateAddressCache(request); } } } private void updateAddressCache(final RxDocumentServiceRequest request) { try{ if (this.updatingAddressCache.compareAndSet(false, true)) { if (logger.isDebugEnabled()) { logger.debug( "updateAddressCache ({\"time\":{},\"endpoint\":{},\"partitionAddressCache\":{}})", RntbdObjectMapper.toJson(Instant.now()), RntbdObjectMapper.toJson(this.endpoint), RntbdObjectMapper.toJson(this.partitionAddressCache)); } this.addressResolver.remove(request, this.partitionAddressCache); this.partitionAddressCache.clear(); } } finally { this.updatingAddressCache.set(false); } } }
class RntbdConnectionStateListener { private static final Logger logger = LoggerFactory.getLogger(RntbdConnectionStateListener.class); private final IAddressResolver addressResolver; private final RntbdEndpoint endpoint; private final Set<PartitionKeyRangeIdentity> partitionAddressCache; private final AtomicBoolean updatingAddressCache = new AtomicBoolean(false); public RntbdConnectionStateListener(final IAddressResolver addressResolver, final RntbdEndpoint endpoint) { this.addressResolver = checkNotNull(addressResolver, "expected non-null addressResolver"); this.endpoint = checkNotNull(endpoint, "expected non-null endpoint"); this.partitionAddressCache = ConcurrentHashMap.newKeySet(); } public void updateConnectionState(final RxDocumentServiceRequest request) { checkNotNull("expect non-null request"); PartitionKeyRangeIdentity partitionKeyRangeIdentity = this.getPartitionKeyRangeIdentity(request); checkNotNull(partitionKeyRangeIdentity, "expected non-null partitionKeyRangeIdentity"); this.partitionAddressCache.add(partitionKeyRangeIdentity); if (logger.isDebugEnabled()) { logger.debug( "updateConnectionState({\"time\":{},\"endpoint\":{},\"partitionKeyRangeIdentity\":{}})", RntbdObjectMapper.toJson(Instant.now()), RntbdObjectMapper.toJson(endpoint), RntbdObjectMapper.toJson(partitionKeyRangeIdentity)); } } private PartitionKeyRangeIdentity getPartitionKeyRangeIdentity(final RxDocumentServiceRequest request) { checkNotNull(request, "expect non-null request"); PartitionKeyRangeIdentity partitionKeyRangeIdentity = request.getPartitionKeyRangeIdentity(); if (partitionKeyRangeIdentity == null) { final String partitionKeyRange = checkNotNull( request.requestContext.resolvedPartitionKeyRange, "expected non-null resolvedPartitionKeyRange").getId(); final String collectionRid = request.requestContext.resolvedCollectionRid; partitionKeyRangeIdentity = collectionRid != null ? new PartitionKeyRangeIdentity(collectionRid, partitionKeyRange) : new PartitionKeyRangeIdentity(partitionKeyRange); } return partitionKeyRangeIdentity; } private void onConnectionEvent(final RntbdConnectionEvent event, final RxDocumentServiceRequest request, final Throwable exception) { checkNotNull(request, "expected non-null exception"); checkNotNull(exception, "expected non-null exception"); if (event == RntbdConnectionEvent.READ_EOF) { if (!this.endpoint.isClosed()) { if (logger.isDebugEnabled()) { logger.debug("onConnectionEvent({\"event\":{},\"time\":{},\"endpoint\":{},\"cause\":{})", event, RntbdObjectMapper.toJson(Instant.now()), RntbdObjectMapper.toJson(this.endpoint), RntbdObjectMapper.toJson(exception)); } this.updateAddressCache(request); } } } private void updateAddressCache(final RxDocumentServiceRequest request) { try{ if (this.updatingAddressCache.compareAndSet(false, true)) { if (logger.isDebugEnabled()) { logger.debug( "updateAddressCache ({\"time\":{},\"endpoint\":{},\"partitionAddressCache\":{}})", RntbdObjectMapper.toJson(Instant.now()), RntbdObjectMapper.toJson(this.endpoint), RntbdObjectMapper.toJson(this.partitionAddressCache)); } this.addressResolver.remove(request, this.partitionAddressCache); this.partitionAddressCache.clear(); } } finally { this.updatingAddressCache.set(false); } } }
nit: we can use DESCRIPTION_FOR_SEARCH and likewise for Name, will look cleaner
public void beginCreateSearch() { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add("27b53eec-8ff4-4070-8900-fbeaabfd158a"); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription("testsearch20200014") .setDisplayName("testsearch20200014") .setPhonePlanIds(phonePlanIds) .setQuantity(2); Duration duration = Duration.ofSeconds(5); PhoneNumberAsyncClient client = this.getClient(); PollerFlux<PhoneNumberSearch, PhoneNumberSearch> res = client.beginCreateSearch(createSearchOptions, duration); SyncPoller<PhoneNumberSearch, PhoneNumberSearch> sync = res.getSyncPoller(); sync.waitForCompletion(); PhoneNumberSearch testResult = sync.getFinalResult(); assertEquals(testResult.getPhoneNumbers().size(), 2); assertNotNull(testResult.getSearchId()); }
.setDescription("testsearch20200014")
public void beginCreateSearch() { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription(SEARCH_OPTIONS_DESCRIPTION) .setDisplayName(SEARCH_OPTIONS_NAME) .setPhonePlanIds(phonePlanIds) .setQuantity(2); Duration duration = Duration.ofSeconds(1); PhoneNumberAsyncClient client = this.getClient(); PollerFlux<PhoneNumberSearch, PhoneNumberSearch> poller = client.beginCreateSearch(createSearchOptions, duration); AsyncPollResponse<PhoneNumberSearch, PhoneNumberSearch> asyncRes = poller.takeUntil(apr -> apr.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED) .blockLast(); PhoneNumberSearch testResult = asyncRes.getValue(); assertEquals(testResult.getPhoneNumbers().size(), 2); assertNotNull(testResult.getSearchId()); }
class PhoneNumberAsyncClientIntegrationTest extends PhoneNumberIntegrationTestBase { @Test() public void listAllPhoneNumbers() { PagedFlux<AcquiredPhoneNumber> pagedFlux = this.getClient().listAllPhoneNumbers(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhoneNumber()); }) .verifyComplete(); } @Test() public void listPhonePlanGroups() { PagedFlux<PhonePlanGroup> pagedFlux = this.getClient().listPhonePlanGroups(COUNTRY_CODE, LOCALE, true); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhonePlanGroupId()); }) .verifyComplete(); } @Test() public void listPhonePlans() { PagedFlux<PhonePlan> pagedFlux = this.getClient().listPhonePlans(COUNTRY_CODE, PHONE_PLAN_GROUP_ID, LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhonePlanId()); }) .verifyComplete(); } @Test() public void listAllReleases() { PagedFlux<PhoneNumberEntity> pagedFlux = this.getClient().listAllReleases(); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getId()); }) .verifyComplete(); } @Test() public void listAllSearches() { PagedFlux<PhoneNumberEntity> pagedFlux = this.getClient().listAllSearches(); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getId()); }) .verifyComplete(); } @Test() public void listAllSupportedCountries() { PagedFlux<PhoneNumberCountry> pagedFlux = this.getClient().listAllSupportedCountries(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getCountryCode()); }) .verifyComplete(); } @Test() public void getPhonePlanLocationOptions() { Mono<LocationOptionsResponse> mono = this.getClient().getPhonePlanLocationOptions(COUNTRY_CODE, PHONE_PLAN_GROUP_ID, PHONE_PLAN_ID, LOCALE); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getLocationOptions().getLabelId()); }) .verifyComplete(); } @Test() public void getAllAreaCodes() { List<LocationOptionsQuery> locationOptions = new ArrayList<>(); LocationOptionsQuery query = new LocationOptionsQuery(); query.setLabelId("state"); query.setOptionsValue(LOCATION_OPTION_STATE); locationOptions.add(query); query = new LocationOptionsQuery(); query.setLabelId("city"); query.setOptionsValue(LOCATION_OPTION_CITY); locationOptions.add(query); Mono<AreaCodes> mono = this.getClient().getAllAreaCodes("selection", COUNTRY_CODE, PHONE_PLAN_ID, locationOptions); StepVerifier.create(mono) .assertNext(item -> { assertTrue(item.getPrimaryAreaCodes().size() > 0); }) .verifyComplete(); } @Test() public void getAllAreaCodesWithResponse() { List<LocationOptionsQuery> locationOptions = new ArrayList<>(); LocationOptionsQuery query = new LocationOptionsQuery(); query.setLabelId("state"); query.setOptionsValue(LOCATION_OPTION_STATE); locationOptions.add(query); query = new LocationOptionsQuery(); query.setLabelId("city"); query.setOptionsValue(LOCATION_OPTION_CITY); locationOptions.add(query); Mono<Response<AreaCodes>> mono = this.getClient().getAllAreaCodesWithResponse( "selection", COUNTRY_CODE, PHONE_PLAN_ID, locationOptions, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertTrue(item.getValue().getPrimaryAreaCodes().size() > 0); }) .verifyComplete(); } @Test() public void updateCapabilities() { List<Capability> capabilitiesToAdd = new ArrayList<>(); capabilitiesToAdd.add(Capability.INBOUND_CALLING); NumberUpdateCapabilities update = new NumberUpdateCapabilities(); update.setAdd(capabilitiesToAdd); Map<PhoneNumber, NumberUpdateCapabilities> updateMap = new HashMap<>(); updateMap.put(new PhoneNumber(PHONENUMBER_FOR_CAPABILITIES), update); Mono<UpdateNumberCapabilitiesResponse> mono = this.getClient().updateCapabilities(updateMap); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void updateCapabilitiesWithResponse() { List<Capability> capabilitiesToAdd = new ArrayList<>(); capabilitiesToAdd.add(Capability.INBOUND_CALLING); NumberUpdateCapabilities update = new NumberUpdateCapabilities(); update.setAdd(capabilitiesToAdd); Map<PhoneNumber, NumberUpdateCapabilities> updateMap = new HashMap<>(); updateMap.put(new PhoneNumber(PHONENUMBER_FOR_CAPABILITIES), update); Mono<Response<UpdateNumberCapabilitiesResponse>> mono = this.getClient().updateCapabilitiesWithResponse(updateMap, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void getCapabilitiesUpdate() { Mono<UpdatePhoneNumberCapabilitiesResponse> mono = this.getClient().getCapabilitiesUpdate(CAPABILITIES_ID); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void getCapabilitiesUpdateWithResponse() { Mono<Response<UpdatePhoneNumberCapabilitiesResponse>> mono = this.getClient().getCapabilitiesUpdateWithResponse(CAPABILITIES_ID, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void createSearch() { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription("testsearch20200014") .setDisplayName("testsearch20200014") .setPhonePlanIds(phonePlanIds) .setQuantity(1); Mono<CreateSearchResponse> mono = this.getClient().createSearch(createSearchOptions); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getSearchId()); }) .verifyComplete(); } @Test() public void createSearchWithResponse() { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription("testsearch20200014") .setDisplayName("testsearch20200014") .setPhonePlanIds(phonePlanIds) .setQuantity(1); Mono<Response<CreateSearchResponse>> mono = this.getClient().createSearchWithResponse(createSearchOptions, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(201, item.getStatusCode()); assertNotNull(item.getValue().getSearchId()); }) .verifyComplete(); } @Test() public void getSearchById() { Mono<PhoneNumberSearch> mono = this.getClient().getSearchById(SEARCH_ID); StepVerifier.create(mono) .assertNext(item -> { assertEquals(SEARCH_ID, item.getSearchId()); }) .verifyComplete(); } @Test() public void getSearchByIdWithResponse() { Mono<Response<PhoneNumberSearch>> mono = this.getClient().getSearchByIdWithResponse(SEARCH_ID, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertEquals(SEARCH_ID, item.getValue().getSearchId()); }) .verifyComplete(); } @Test() public void purchaseSearch() { Mono<Void> mono = this.getClient().purchaseSearch(SEARCH_ID_TO_PURCHASE); StepVerifier.create(mono).verifyComplete(); } @Test() public void purchaseSearchWithResponse() { Mono<Response<Void>> mono = this.getClient().purchaseSearchWithResponse(SEARCH_ID_TO_PURCHASE, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(202, item.getStatusCode()); }) .verifyComplete(); } @Test() public void cancelSearch() { Mono<Void> mono = this.getClient().cancelSearch(SEARCH_ID_TO_CANCEL); StepVerifier.create(mono).verifyComplete(); } @Test() public void cancelSearchWithResponse() { Mono<Response<Void>> mono = this.getClient().cancelSearchWithResponse(SEARCH_ID_TO_CANCEL, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(202, item.getStatusCode()); }) .verifyComplete(); } @Test() public void configureNumber() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_CONFIGURE); PstnConfiguration pstnConfiguration = new PstnConfiguration(); pstnConfiguration.setApplicationId("ApplicationId"); pstnConfiguration.setCallbackUrl("https: Mono<Void> mono = this.getClient().configureNumber(number, pstnConfiguration); StepVerifier.create(mono).verifyComplete(); } @Test() public void configureNumberWithResponse() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_CONFIGURE); PstnConfiguration pstnConfiguration = new PstnConfiguration(); pstnConfiguration.setApplicationId("ApplicationId"); pstnConfiguration.setCallbackUrl("https: Mono<Response<Void>> mono = this.getClient().configureNumberWithResponse(number, pstnConfiguration, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); }) .verifyComplete(); } @Test() public void getNumberConfiguration() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_GET_CONFIG); Mono<NumberConfigurationResponse> mono = this.getClient().getNumberConfiguration(number); StepVerifier.create(mono) .assertNext(item -> { assertEquals("ApplicationId", item.getPstnConfiguration().getApplicationId()); }) .verifyComplete(); } @Test() public void getNumberConfigurationWithResponse() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_GET_CONFIG); Mono<Response<NumberConfigurationResponse>> mono = this.getClient().getNumberConfigurationWithResponse(number, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertEquals("ApplicationId", item.getValue().getPstnConfiguration().getApplicationId()); }) .verifyComplete(); } @Test() public void unconfigureNumber() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_UNCONFIGURE); Mono<Void> mono = this.getClient().unconfigureNumber(number); StepVerifier.create(mono).verifyComplete(); } @Test() public void unconfigureNumberWithResponse() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_UNCONFIGURE); Mono<Response<Void>> mono = this.getClient().unconfigureNumberWithResponse(number, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); }) .verifyComplete(); } @Test() public void releasePhoneNumbers() { List<PhoneNumber> phoneNumbers = new ArrayList<>(); phoneNumbers.add(new PhoneNumber(PHONENUMBER_TO_RELEASE)); Mono<ReleaseResponse> mono = this.getClient().releasePhoneNumbers(phoneNumbers); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getReleaseId()); }) .verifyComplete(); } @Test() public void releasePhoneNumbersWithResponse() { List<PhoneNumber> phoneNumbers = new ArrayList<>(); phoneNumbers.add(new PhoneNumber(PHONENUMBER_TO_RELEASE)); Mono<Response<ReleaseResponse>> mono = this.getClient().releasePhoneNumbersWithResponse(phoneNumbers, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getReleaseId()); }) .verifyComplete(); } @Test() private PhoneNumberAsyncClient getClient() { return super.getClientBuilder().buildAsyncClient(); } }
class PhoneNumberAsyncClientIntegrationTest extends PhoneNumberIntegrationTestBase { @Test() public void createAsyncPhoneNumberClientWithConnectionString() { PhoneNumberAsyncClient phoneNumberAsyncClient = getClientBuilderWithConnectionString().buildAsyncClient(); assertNotNull(phoneNumberAsyncClient); PagedFlux<AcquiredPhoneNumber> pagedFlux = phoneNumberAsyncClient.listAllPhoneNumbers(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhoneNumber()); }) .verifyComplete(); } @Test() public void listAllPhoneNumbers() { PagedFlux<AcquiredPhoneNumber> pagedFlux = this.getClient().listAllPhoneNumbers(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhoneNumber()); }) .verifyComplete(); } @Test() public void listPhonePlanGroups() { PagedFlux<PhonePlanGroup> pagedFlux = this.getClient().listPhonePlanGroups(COUNTRY_CODE, LOCALE, true); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhonePlanGroupId()); }) .verifyComplete(); } @Test() public void listPhonePlans() { PagedFlux<PhonePlan> pagedFlux = this.getClient().listPhonePlans(COUNTRY_CODE, PHONE_PLAN_GROUP_ID, LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhonePlanId()); }) .verifyComplete(); } @Test() public void listAllReleases() { PagedFlux<PhoneNumberEntity> pagedFlux = this.getClient().listAllReleases(); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getId()); }) .verifyComplete(); } @Test() public void listAllSearches() { PagedFlux<PhoneNumberEntity> pagedFlux = this.getClient().listAllSearches(); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getId()); }) .verifyComplete(); } @Test() public void listAllSupportedCountries() { PagedFlux<PhoneNumberCountry> pagedFlux = this.getClient().listAllSupportedCountries(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getCountryCode()); }) .verifyComplete(); } @Test() public void getPhonePlanLocationOptions() { Mono<LocationOptionsResponse> mono = this.getClient().getPhonePlanLocationOptions(COUNTRY_CODE, PHONE_PLAN_GROUP_ID, PHONE_PLAN_ID, LOCALE); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getLocationOptions().getLabelId()); }) .verifyComplete(); } @Test() public void getAllAreaCodes() { List<LocationOptionsQuery> locationOptions = new ArrayList<>(); LocationOptionsQuery query = new LocationOptionsQuery(); query.setLabelId("state"); query.setOptionsValue(LOCATION_OPTION_STATE); locationOptions.add(query); query = new LocationOptionsQuery(); query.setLabelId("city"); query.setOptionsValue(LOCATION_OPTION_CITY); locationOptions.add(query); Mono<AreaCodes> mono = this.getClient().getAllAreaCodes("selection", COUNTRY_CODE, PHONE_PLAN_ID, locationOptions); StepVerifier.create(mono) .assertNext(item -> { assertTrue(item.getPrimaryAreaCodes().size() > 0); }) .verifyComplete(); } @Test() public void getAllAreaCodesWithResponse() { List<LocationOptionsQuery> locationOptions = new ArrayList<>(); LocationOptionsQuery query = new LocationOptionsQuery(); query.setLabelId("state"); query.setOptionsValue(LOCATION_OPTION_STATE); locationOptions.add(query); query = new LocationOptionsQuery(); query.setLabelId("city"); query.setOptionsValue(LOCATION_OPTION_CITY); locationOptions.add(query); Mono<Response<AreaCodes>> mono = this.getClient().getAllAreaCodesWithResponse( "selection", COUNTRY_CODE, PHONE_PLAN_ID, locationOptions, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertTrue(item.getValue().getPrimaryAreaCodes().size() > 0); }) .verifyComplete(); } @Test() public void updateCapabilities() { List<Capability> capabilitiesToAdd = new ArrayList<>(); capabilitiesToAdd.add(Capability.INBOUND_CALLING); NumberUpdateCapabilities update = new NumberUpdateCapabilities(); update.setAdd(capabilitiesToAdd); Map<PhoneNumber, NumberUpdateCapabilities> updateMap = new HashMap<>(); updateMap.put(new PhoneNumber(PHONENUMBER_FOR_CAPABILITIES), update); Mono<UpdateNumberCapabilitiesResponse> mono = this.getClient().updateCapabilities(updateMap); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void updateCapabilitiesWithResponse() { List<Capability> capabilitiesToAdd = new ArrayList<>(); capabilitiesToAdd.add(Capability.INBOUND_CALLING); NumberUpdateCapabilities update = new NumberUpdateCapabilities(); update.setAdd(capabilitiesToAdd); Map<PhoneNumber, NumberUpdateCapabilities> updateMap = new HashMap<>(); updateMap.put(new PhoneNumber(PHONENUMBER_FOR_CAPABILITIES), update); Mono<Response<UpdateNumberCapabilitiesResponse>> mono = this.getClient().updateCapabilitiesWithResponse(updateMap, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void getCapabilitiesUpdate() { Mono<UpdatePhoneNumberCapabilitiesResponse> mono = this.getClient().getCapabilitiesUpdate(CAPABILITIES_ID); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void getCapabilitiesUpdateWithResponse() { Mono<Response<UpdatePhoneNumberCapabilitiesResponse>> mono = this.getClient().getCapabilitiesUpdateWithResponse(CAPABILITIES_ID, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void createSearch() { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription("testsearch20200014") .setDisplayName("testsearch20200014") .setPhonePlanIds(phonePlanIds) .setQuantity(1); Mono<CreateSearchResponse> mono = this.getClient().createSearch(createSearchOptions); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getSearchId()); }) .verifyComplete(); } @Test() public void createSearchWithResponse() { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription("testsearch20200014") .setDisplayName("testsearch20200014") .setPhonePlanIds(phonePlanIds) .setQuantity(1); Mono<Response<CreateSearchResponse>> mono = this.getClient().createSearchWithResponse(createSearchOptions, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(201, item.getStatusCode()); assertNotNull(item.getValue().getSearchId()); }) .verifyComplete(); } @Test() public void getSearchById() { Mono<PhoneNumberSearch> mono = this.getClient().getSearchById(SEARCH_ID); StepVerifier.create(mono) .assertNext(item -> { assertEquals(SEARCH_ID, item.getSearchId()); }) .verifyComplete(); } @Test() public void getSearchByIdWithResponse() { Mono<Response<PhoneNumberSearch>> mono = this.getClient().getSearchByIdWithResponse(SEARCH_ID, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertEquals(SEARCH_ID, item.getValue().getSearchId()); }) .verifyComplete(); } @Test() public void purchaseSearch() { Mono<Void> mono = this.getClient().purchaseSearch(SEARCH_ID_TO_PURCHASE); StepVerifier.create(mono).verifyComplete(); } @Test() public void purchaseSearchWithResponse() { Mono<Response<Void>> mono = this.getClient().purchaseSearchWithResponse(SEARCH_ID_TO_PURCHASE, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(202, item.getStatusCode()); }) .verifyComplete(); } @Test() public void cancelSearch() { Mono<Void> mono = this.getClient().cancelSearch(SEARCH_ID_TO_CANCEL); StepVerifier.create(mono).verifyComplete(); } @Test() public void cancelSearchWithResponse() { Mono<Response<Void>> mono = this.getClient().cancelSearchWithResponse(SEARCH_ID_TO_CANCEL, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(202, item.getStatusCode()); }) .verifyComplete(); } @Test() public void configureNumber() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_CONFIGURE); PstnConfiguration pstnConfiguration = new PstnConfiguration(); pstnConfiguration.setApplicationId("ApplicationId"); pstnConfiguration.setCallbackUrl("https: Mono<Void> mono = this.getClient().configureNumber(number, pstnConfiguration); StepVerifier.create(mono).verifyComplete(); } @Test() public void configureNumberWithResponse() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_CONFIGURE); PstnConfiguration pstnConfiguration = new PstnConfiguration(); pstnConfiguration.setApplicationId("ApplicationId"); pstnConfiguration.setCallbackUrl("https: Mono<Response<Void>> mono = this.getClient().configureNumberWithResponse(number, pstnConfiguration, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); }) .verifyComplete(); } @Test() public void getNumberConfiguration() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_GET_CONFIG); Mono<NumberConfigurationResponse> mono = this.getClient().getNumberConfiguration(number); StepVerifier.create(mono) .assertNext(item -> { assertEquals("ApplicationId", item.getPstnConfiguration().getApplicationId()); }) .verifyComplete(); } @Test() public void getNumberConfigurationWithResponse() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_GET_CONFIG); Mono<Response<NumberConfigurationResponse>> mono = this.getClient().getNumberConfigurationWithResponse(number, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertEquals("ApplicationId", item.getValue().getPstnConfiguration().getApplicationId()); }) .verifyComplete(); } @Test() public void unconfigureNumber() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_UNCONFIGURE); Mono<Void> mono = this.getClient().unconfigureNumber(number); StepVerifier.create(mono).verifyComplete(); } @Test() public void unconfigureNumberWithResponse() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_UNCONFIGURE); Mono<Response<Void>> mono = this.getClient().unconfigureNumberWithResponse(number, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); }) .verifyComplete(); } @Test() public void releasePhoneNumbers() { List<PhoneNumber> phoneNumbers = new ArrayList<>(); phoneNumbers.add(new PhoneNumber(PHONENUMBER_TO_RELEASE)); Mono<ReleaseResponse> mono = this.getClient().releasePhoneNumbers(phoneNumbers); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getReleaseId()); }) .verifyComplete(); } @Test() public void releasePhoneNumbersWithResponse() { List<PhoneNumber> phoneNumbers = new ArrayList<>(); phoneNumbers.add(new PhoneNumber(PHONENUMBER_TO_RELEASE)); Mono<Response<ReleaseResponse>> mono = this.getClient().releasePhoneNumbersWithResponse(phoneNumbers, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getReleaseId()); }) .verifyComplete(); } @Test() private PhoneNumberAsyncClient getClient() { return super.getClientBuilder().buildAsyncClient(); } }
Can we read this from env variables?
public void beginCreateSearch() { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add("27b53eec-8ff4-4070-8900-fbeaabfd158a"); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription("testsearch20200014") .setDisplayName("testsearch20200014") .setPhonePlanIds(phonePlanIds) .setQuantity(2); Duration duration = Duration.ofSeconds(5); PhoneNumberAsyncClient client = this.getClient(); PollerFlux<PhoneNumberSearch, PhoneNumberSearch> res = client.beginCreateSearch(createSearchOptions, duration); SyncPoller<PhoneNumberSearch, PhoneNumberSearch> sync = res.getSyncPoller(); sync.waitForCompletion(); PhoneNumberSearch testResult = sync.getFinalResult(); assertEquals(testResult.getPhoneNumbers().size(), 2); assertNotNull(testResult.getSearchId()); }
phonePlanIds.add("27b53eec-8ff4-4070-8900-fbeaabfd158a");
public void beginCreateSearch() { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription(SEARCH_OPTIONS_DESCRIPTION) .setDisplayName(SEARCH_OPTIONS_NAME) .setPhonePlanIds(phonePlanIds) .setQuantity(2); Duration duration = Duration.ofSeconds(1); PhoneNumberAsyncClient client = this.getClient(); PollerFlux<PhoneNumberSearch, PhoneNumberSearch> poller = client.beginCreateSearch(createSearchOptions, duration); AsyncPollResponse<PhoneNumberSearch, PhoneNumberSearch> asyncRes = poller.takeUntil(apr -> apr.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED) .blockLast(); PhoneNumberSearch testResult = asyncRes.getValue(); assertEquals(testResult.getPhoneNumbers().size(), 2); assertNotNull(testResult.getSearchId()); }
class PhoneNumberAsyncClientIntegrationTest extends PhoneNumberIntegrationTestBase { @Test() public void listAllPhoneNumbers() { PagedFlux<AcquiredPhoneNumber> pagedFlux = this.getClient().listAllPhoneNumbers(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhoneNumber()); }) .verifyComplete(); } @Test() public void listPhonePlanGroups() { PagedFlux<PhonePlanGroup> pagedFlux = this.getClient().listPhonePlanGroups(COUNTRY_CODE, LOCALE, true); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhonePlanGroupId()); }) .verifyComplete(); } @Test() public void listPhonePlans() { PagedFlux<PhonePlan> pagedFlux = this.getClient().listPhonePlans(COUNTRY_CODE, PHONE_PLAN_GROUP_ID, LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhonePlanId()); }) .verifyComplete(); } @Test() public void listAllReleases() { PagedFlux<PhoneNumberEntity> pagedFlux = this.getClient().listAllReleases(); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getId()); }) .verifyComplete(); } @Test() public void listAllSearches() { PagedFlux<PhoneNumberEntity> pagedFlux = this.getClient().listAllSearches(); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getId()); }) .verifyComplete(); } @Test() public void listAllSupportedCountries() { PagedFlux<PhoneNumberCountry> pagedFlux = this.getClient().listAllSupportedCountries(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getCountryCode()); }) .verifyComplete(); } @Test() public void getPhonePlanLocationOptions() { Mono<LocationOptionsResponse> mono = this.getClient().getPhonePlanLocationOptions(COUNTRY_CODE, PHONE_PLAN_GROUP_ID, PHONE_PLAN_ID, LOCALE); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getLocationOptions().getLabelId()); }) .verifyComplete(); } @Test() public void getAllAreaCodes() { List<LocationOptionsQuery> locationOptions = new ArrayList<>(); LocationOptionsQuery query = new LocationOptionsQuery(); query.setLabelId("state"); query.setOptionsValue(LOCATION_OPTION_STATE); locationOptions.add(query); query = new LocationOptionsQuery(); query.setLabelId("city"); query.setOptionsValue(LOCATION_OPTION_CITY); locationOptions.add(query); Mono<AreaCodes> mono = this.getClient().getAllAreaCodes("selection", COUNTRY_CODE, PHONE_PLAN_ID, locationOptions); StepVerifier.create(mono) .assertNext(item -> { assertTrue(item.getPrimaryAreaCodes().size() > 0); }) .verifyComplete(); } @Test() public void getAllAreaCodesWithResponse() { List<LocationOptionsQuery> locationOptions = new ArrayList<>(); LocationOptionsQuery query = new LocationOptionsQuery(); query.setLabelId("state"); query.setOptionsValue(LOCATION_OPTION_STATE); locationOptions.add(query); query = new LocationOptionsQuery(); query.setLabelId("city"); query.setOptionsValue(LOCATION_OPTION_CITY); locationOptions.add(query); Mono<Response<AreaCodes>> mono = this.getClient().getAllAreaCodesWithResponse( "selection", COUNTRY_CODE, PHONE_PLAN_ID, locationOptions, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertTrue(item.getValue().getPrimaryAreaCodes().size() > 0); }) .verifyComplete(); } @Test() public void updateCapabilities() { List<Capability> capabilitiesToAdd = new ArrayList<>(); capabilitiesToAdd.add(Capability.INBOUND_CALLING); NumberUpdateCapabilities update = new NumberUpdateCapabilities(); update.setAdd(capabilitiesToAdd); Map<PhoneNumber, NumberUpdateCapabilities> updateMap = new HashMap<>(); updateMap.put(new PhoneNumber(PHONENUMBER_FOR_CAPABILITIES), update); Mono<UpdateNumberCapabilitiesResponse> mono = this.getClient().updateCapabilities(updateMap); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void updateCapabilitiesWithResponse() { List<Capability> capabilitiesToAdd = new ArrayList<>(); capabilitiesToAdd.add(Capability.INBOUND_CALLING); NumberUpdateCapabilities update = new NumberUpdateCapabilities(); update.setAdd(capabilitiesToAdd); Map<PhoneNumber, NumberUpdateCapabilities> updateMap = new HashMap<>(); updateMap.put(new PhoneNumber(PHONENUMBER_FOR_CAPABILITIES), update); Mono<Response<UpdateNumberCapabilitiesResponse>> mono = this.getClient().updateCapabilitiesWithResponse(updateMap, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void getCapabilitiesUpdate() { Mono<UpdatePhoneNumberCapabilitiesResponse> mono = this.getClient().getCapabilitiesUpdate(CAPABILITIES_ID); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void getCapabilitiesUpdateWithResponse() { Mono<Response<UpdatePhoneNumberCapabilitiesResponse>> mono = this.getClient().getCapabilitiesUpdateWithResponse(CAPABILITIES_ID, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void createSearch() { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription("testsearch20200014") .setDisplayName("testsearch20200014") .setPhonePlanIds(phonePlanIds) .setQuantity(1); Mono<CreateSearchResponse> mono = this.getClient().createSearch(createSearchOptions); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getSearchId()); }) .verifyComplete(); } @Test() public void createSearchWithResponse() { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription("testsearch20200014") .setDisplayName("testsearch20200014") .setPhonePlanIds(phonePlanIds) .setQuantity(1); Mono<Response<CreateSearchResponse>> mono = this.getClient().createSearchWithResponse(createSearchOptions, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(201, item.getStatusCode()); assertNotNull(item.getValue().getSearchId()); }) .verifyComplete(); } @Test() public void getSearchById() { Mono<PhoneNumberSearch> mono = this.getClient().getSearchById(SEARCH_ID); StepVerifier.create(mono) .assertNext(item -> { assertEquals(SEARCH_ID, item.getSearchId()); }) .verifyComplete(); } @Test() public void getSearchByIdWithResponse() { Mono<Response<PhoneNumberSearch>> mono = this.getClient().getSearchByIdWithResponse(SEARCH_ID, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertEquals(SEARCH_ID, item.getValue().getSearchId()); }) .verifyComplete(); } @Test() public void purchaseSearch() { Mono<Void> mono = this.getClient().purchaseSearch(SEARCH_ID_TO_PURCHASE); StepVerifier.create(mono).verifyComplete(); } @Test() public void purchaseSearchWithResponse() { Mono<Response<Void>> mono = this.getClient().purchaseSearchWithResponse(SEARCH_ID_TO_PURCHASE, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(202, item.getStatusCode()); }) .verifyComplete(); } @Test() public void cancelSearch() { Mono<Void> mono = this.getClient().cancelSearch(SEARCH_ID_TO_CANCEL); StepVerifier.create(mono).verifyComplete(); } @Test() public void cancelSearchWithResponse() { Mono<Response<Void>> mono = this.getClient().cancelSearchWithResponse(SEARCH_ID_TO_CANCEL, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(202, item.getStatusCode()); }) .verifyComplete(); } @Test() public void configureNumber() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_CONFIGURE); PstnConfiguration pstnConfiguration = new PstnConfiguration(); pstnConfiguration.setApplicationId("ApplicationId"); pstnConfiguration.setCallbackUrl("https: Mono<Void> mono = this.getClient().configureNumber(number, pstnConfiguration); StepVerifier.create(mono).verifyComplete(); } @Test() public void configureNumberWithResponse() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_CONFIGURE); PstnConfiguration pstnConfiguration = new PstnConfiguration(); pstnConfiguration.setApplicationId("ApplicationId"); pstnConfiguration.setCallbackUrl("https: Mono<Response<Void>> mono = this.getClient().configureNumberWithResponse(number, pstnConfiguration, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); }) .verifyComplete(); } @Test() public void getNumberConfiguration() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_GET_CONFIG); Mono<NumberConfigurationResponse> mono = this.getClient().getNumberConfiguration(number); StepVerifier.create(mono) .assertNext(item -> { assertEquals("ApplicationId", item.getPstnConfiguration().getApplicationId()); }) .verifyComplete(); } @Test() public void getNumberConfigurationWithResponse() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_GET_CONFIG); Mono<Response<NumberConfigurationResponse>> mono = this.getClient().getNumberConfigurationWithResponse(number, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertEquals("ApplicationId", item.getValue().getPstnConfiguration().getApplicationId()); }) .verifyComplete(); } @Test() public void unconfigureNumber() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_UNCONFIGURE); Mono<Void> mono = this.getClient().unconfigureNumber(number); StepVerifier.create(mono).verifyComplete(); } @Test() public void unconfigureNumberWithResponse() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_UNCONFIGURE); Mono<Response<Void>> mono = this.getClient().unconfigureNumberWithResponse(number, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); }) .verifyComplete(); } @Test() public void releasePhoneNumbers() { List<PhoneNumber> phoneNumbers = new ArrayList<>(); phoneNumbers.add(new PhoneNumber(PHONENUMBER_TO_RELEASE)); Mono<ReleaseResponse> mono = this.getClient().releasePhoneNumbers(phoneNumbers); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getReleaseId()); }) .verifyComplete(); } @Test() public void releasePhoneNumbersWithResponse() { List<PhoneNumber> phoneNumbers = new ArrayList<>(); phoneNumbers.add(new PhoneNumber(PHONENUMBER_TO_RELEASE)); Mono<Response<ReleaseResponse>> mono = this.getClient().releasePhoneNumbersWithResponse(phoneNumbers, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getReleaseId()); }) .verifyComplete(); } @Test() private PhoneNumberAsyncClient getClient() { return super.getClientBuilder().buildAsyncClient(); } }
class PhoneNumberAsyncClientIntegrationTest extends PhoneNumberIntegrationTestBase { @Test() public void createAsyncPhoneNumberClientWithConnectionString() { PhoneNumberAsyncClient phoneNumberAsyncClient = getClientBuilderWithConnectionString().buildAsyncClient(); assertNotNull(phoneNumberAsyncClient); PagedFlux<AcquiredPhoneNumber> pagedFlux = phoneNumberAsyncClient.listAllPhoneNumbers(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhoneNumber()); }) .verifyComplete(); } @Test() public void listAllPhoneNumbers() { PagedFlux<AcquiredPhoneNumber> pagedFlux = this.getClient().listAllPhoneNumbers(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhoneNumber()); }) .verifyComplete(); } @Test() public void listPhonePlanGroups() { PagedFlux<PhonePlanGroup> pagedFlux = this.getClient().listPhonePlanGroups(COUNTRY_CODE, LOCALE, true); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhonePlanGroupId()); }) .verifyComplete(); } @Test() public void listPhonePlans() { PagedFlux<PhonePlan> pagedFlux = this.getClient().listPhonePlans(COUNTRY_CODE, PHONE_PLAN_GROUP_ID, LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhonePlanId()); }) .verifyComplete(); } @Test() public void listAllReleases() { PagedFlux<PhoneNumberEntity> pagedFlux = this.getClient().listAllReleases(); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getId()); }) .verifyComplete(); } @Test() public void listAllSearches() { PagedFlux<PhoneNumberEntity> pagedFlux = this.getClient().listAllSearches(); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getId()); }) .verifyComplete(); } @Test() public void listAllSupportedCountries() { PagedFlux<PhoneNumberCountry> pagedFlux = this.getClient().listAllSupportedCountries(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getCountryCode()); }) .verifyComplete(); } @Test() public void getPhonePlanLocationOptions() { Mono<LocationOptionsResponse> mono = this.getClient().getPhonePlanLocationOptions(COUNTRY_CODE, PHONE_PLAN_GROUP_ID, PHONE_PLAN_ID, LOCALE); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getLocationOptions().getLabelId()); }) .verifyComplete(); } @Test() public void getAllAreaCodes() { List<LocationOptionsQuery> locationOptions = new ArrayList<>(); LocationOptionsQuery query = new LocationOptionsQuery(); query.setLabelId("state"); query.setOptionsValue(LOCATION_OPTION_STATE); locationOptions.add(query); query = new LocationOptionsQuery(); query.setLabelId("city"); query.setOptionsValue(LOCATION_OPTION_CITY); locationOptions.add(query); Mono<AreaCodes> mono = this.getClient().getAllAreaCodes("selection", COUNTRY_CODE, PHONE_PLAN_ID, locationOptions); StepVerifier.create(mono) .assertNext(item -> { assertTrue(item.getPrimaryAreaCodes().size() > 0); }) .verifyComplete(); } @Test() public void getAllAreaCodesWithResponse() { List<LocationOptionsQuery> locationOptions = new ArrayList<>(); LocationOptionsQuery query = new LocationOptionsQuery(); query.setLabelId("state"); query.setOptionsValue(LOCATION_OPTION_STATE); locationOptions.add(query); query = new LocationOptionsQuery(); query.setLabelId("city"); query.setOptionsValue(LOCATION_OPTION_CITY); locationOptions.add(query); Mono<Response<AreaCodes>> mono = this.getClient().getAllAreaCodesWithResponse( "selection", COUNTRY_CODE, PHONE_PLAN_ID, locationOptions, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertTrue(item.getValue().getPrimaryAreaCodes().size() > 0); }) .verifyComplete(); } @Test() public void updateCapabilities() { List<Capability> capabilitiesToAdd = new ArrayList<>(); capabilitiesToAdd.add(Capability.INBOUND_CALLING); NumberUpdateCapabilities update = new NumberUpdateCapabilities(); update.setAdd(capabilitiesToAdd); Map<PhoneNumber, NumberUpdateCapabilities> updateMap = new HashMap<>(); updateMap.put(new PhoneNumber(PHONENUMBER_FOR_CAPABILITIES), update); Mono<UpdateNumberCapabilitiesResponse> mono = this.getClient().updateCapabilities(updateMap); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void updateCapabilitiesWithResponse() { List<Capability> capabilitiesToAdd = new ArrayList<>(); capabilitiesToAdd.add(Capability.INBOUND_CALLING); NumberUpdateCapabilities update = new NumberUpdateCapabilities(); update.setAdd(capabilitiesToAdd); Map<PhoneNumber, NumberUpdateCapabilities> updateMap = new HashMap<>(); updateMap.put(new PhoneNumber(PHONENUMBER_FOR_CAPABILITIES), update); Mono<Response<UpdateNumberCapabilitiesResponse>> mono = this.getClient().updateCapabilitiesWithResponse(updateMap, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void getCapabilitiesUpdate() { Mono<UpdatePhoneNumberCapabilitiesResponse> mono = this.getClient().getCapabilitiesUpdate(CAPABILITIES_ID); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void getCapabilitiesUpdateWithResponse() { Mono<Response<UpdatePhoneNumberCapabilitiesResponse>> mono = this.getClient().getCapabilitiesUpdateWithResponse(CAPABILITIES_ID, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void createSearch() { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription("testsearch20200014") .setDisplayName("testsearch20200014") .setPhonePlanIds(phonePlanIds) .setQuantity(1); Mono<CreateSearchResponse> mono = this.getClient().createSearch(createSearchOptions); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getSearchId()); }) .verifyComplete(); } @Test() public void createSearchWithResponse() { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription("testsearch20200014") .setDisplayName("testsearch20200014") .setPhonePlanIds(phonePlanIds) .setQuantity(1); Mono<Response<CreateSearchResponse>> mono = this.getClient().createSearchWithResponse(createSearchOptions, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(201, item.getStatusCode()); assertNotNull(item.getValue().getSearchId()); }) .verifyComplete(); } @Test() public void getSearchById() { Mono<PhoneNumberSearch> mono = this.getClient().getSearchById(SEARCH_ID); StepVerifier.create(mono) .assertNext(item -> { assertEquals(SEARCH_ID, item.getSearchId()); }) .verifyComplete(); } @Test() public void getSearchByIdWithResponse() { Mono<Response<PhoneNumberSearch>> mono = this.getClient().getSearchByIdWithResponse(SEARCH_ID, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertEquals(SEARCH_ID, item.getValue().getSearchId()); }) .verifyComplete(); } @Test() public void purchaseSearch() { Mono<Void> mono = this.getClient().purchaseSearch(SEARCH_ID_TO_PURCHASE); StepVerifier.create(mono).verifyComplete(); } @Test() public void purchaseSearchWithResponse() { Mono<Response<Void>> mono = this.getClient().purchaseSearchWithResponse(SEARCH_ID_TO_PURCHASE, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(202, item.getStatusCode()); }) .verifyComplete(); } @Test() public void cancelSearch() { Mono<Void> mono = this.getClient().cancelSearch(SEARCH_ID_TO_CANCEL); StepVerifier.create(mono).verifyComplete(); } @Test() public void cancelSearchWithResponse() { Mono<Response<Void>> mono = this.getClient().cancelSearchWithResponse(SEARCH_ID_TO_CANCEL, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(202, item.getStatusCode()); }) .verifyComplete(); } @Test() public void configureNumber() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_CONFIGURE); PstnConfiguration pstnConfiguration = new PstnConfiguration(); pstnConfiguration.setApplicationId("ApplicationId"); pstnConfiguration.setCallbackUrl("https: Mono<Void> mono = this.getClient().configureNumber(number, pstnConfiguration); StepVerifier.create(mono).verifyComplete(); } @Test() public void configureNumberWithResponse() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_CONFIGURE); PstnConfiguration pstnConfiguration = new PstnConfiguration(); pstnConfiguration.setApplicationId("ApplicationId"); pstnConfiguration.setCallbackUrl("https: Mono<Response<Void>> mono = this.getClient().configureNumberWithResponse(number, pstnConfiguration, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); }) .verifyComplete(); } @Test() public void getNumberConfiguration() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_GET_CONFIG); Mono<NumberConfigurationResponse> mono = this.getClient().getNumberConfiguration(number); StepVerifier.create(mono) .assertNext(item -> { assertEquals("ApplicationId", item.getPstnConfiguration().getApplicationId()); }) .verifyComplete(); } @Test() public void getNumberConfigurationWithResponse() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_GET_CONFIG); Mono<Response<NumberConfigurationResponse>> mono = this.getClient().getNumberConfigurationWithResponse(number, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertEquals("ApplicationId", item.getValue().getPstnConfiguration().getApplicationId()); }) .verifyComplete(); } @Test() public void unconfigureNumber() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_UNCONFIGURE); Mono<Void> mono = this.getClient().unconfigureNumber(number); StepVerifier.create(mono).verifyComplete(); } @Test() public void unconfigureNumberWithResponse() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_UNCONFIGURE); Mono<Response<Void>> mono = this.getClient().unconfigureNumberWithResponse(number, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); }) .verifyComplete(); } @Test() public void releasePhoneNumbers() { List<PhoneNumber> phoneNumbers = new ArrayList<>(); phoneNumbers.add(new PhoneNumber(PHONENUMBER_TO_RELEASE)); Mono<ReleaseResponse> mono = this.getClient().releasePhoneNumbers(phoneNumbers); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getReleaseId()); }) .verifyComplete(); } @Test() public void releasePhoneNumbersWithResponse() { List<PhoneNumber> phoneNumbers = new ArrayList<>(); phoneNumbers.add(new PhoneNumber(PHONENUMBER_TO_RELEASE)); Mono<Response<ReleaseResponse>> mono = this.getClient().releasePhoneNumbersWithResponse(phoneNumbers, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getReleaseId()); }) .verifyComplete(); } @Test() private PhoneNumberAsyncClient getClient() { return super.getClientBuilder().buildAsyncClient(); } }
You might want to poll sooner then that?
public void beginCreateSearch() { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add("27b53eec-8ff4-4070-8900-fbeaabfd158a"); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription("testsearch20200014") .setDisplayName("testsearch20200014") .setPhonePlanIds(phonePlanIds) .setQuantity(2); Duration duration = Duration.ofSeconds(5); PhoneNumberAsyncClient client = this.getClient(); PollerFlux<PhoneNumberSearch, PhoneNumberSearch> res = client.beginCreateSearch(createSearchOptions, duration); SyncPoller<PhoneNumberSearch, PhoneNumberSearch> sync = res.getSyncPoller(); sync.waitForCompletion(); PhoneNumberSearch testResult = sync.getFinalResult(); assertEquals(testResult.getPhoneNumbers().size(), 2); assertNotNull(testResult.getSearchId()); }
Duration duration = Duration.ofSeconds(5);
public void beginCreateSearch() { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription(SEARCH_OPTIONS_DESCRIPTION) .setDisplayName(SEARCH_OPTIONS_NAME) .setPhonePlanIds(phonePlanIds) .setQuantity(2); Duration duration = Duration.ofSeconds(1); PhoneNumberAsyncClient client = this.getClient(); PollerFlux<PhoneNumberSearch, PhoneNumberSearch> poller = client.beginCreateSearch(createSearchOptions, duration); AsyncPollResponse<PhoneNumberSearch, PhoneNumberSearch> asyncRes = poller.takeUntil(apr -> apr.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED) .blockLast(); PhoneNumberSearch testResult = asyncRes.getValue(); assertEquals(testResult.getPhoneNumbers().size(), 2); assertNotNull(testResult.getSearchId()); }
class PhoneNumberAsyncClientIntegrationTest extends PhoneNumberIntegrationTestBase { @Test() public void listAllPhoneNumbers() { PagedFlux<AcquiredPhoneNumber> pagedFlux = this.getClient().listAllPhoneNumbers(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhoneNumber()); }) .verifyComplete(); } @Test() public void listPhonePlanGroups() { PagedFlux<PhonePlanGroup> pagedFlux = this.getClient().listPhonePlanGroups(COUNTRY_CODE, LOCALE, true); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhonePlanGroupId()); }) .verifyComplete(); } @Test() public void listPhonePlans() { PagedFlux<PhonePlan> pagedFlux = this.getClient().listPhonePlans(COUNTRY_CODE, PHONE_PLAN_GROUP_ID, LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhonePlanId()); }) .verifyComplete(); } @Test() public void listAllReleases() { PagedFlux<PhoneNumberEntity> pagedFlux = this.getClient().listAllReleases(); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getId()); }) .verifyComplete(); } @Test() public void listAllSearches() { PagedFlux<PhoneNumberEntity> pagedFlux = this.getClient().listAllSearches(); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getId()); }) .verifyComplete(); } @Test() public void listAllSupportedCountries() { PagedFlux<PhoneNumberCountry> pagedFlux = this.getClient().listAllSupportedCountries(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getCountryCode()); }) .verifyComplete(); } @Test() public void getPhonePlanLocationOptions() { Mono<LocationOptionsResponse> mono = this.getClient().getPhonePlanLocationOptions(COUNTRY_CODE, PHONE_PLAN_GROUP_ID, PHONE_PLAN_ID, LOCALE); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getLocationOptions().getLabelId()); }) .verifyComplete(); } @Test() public void getAllAreaCodes() { List<LocationOptionsQuery> locationOptions = new ArrayList<>(); LocationOptionsQuery query = new LocationOptionsQuery(); query.setLabelId("state"); query.setOptionsValue(LOCATION_OPTION_STATE); locationOptions.add(query); query = new LocationOptionsQuery(); query.setLabelId("city"); query.setOptionsValue(LOCATION_OPTION_CITY); locationOptions.add(query); Mono<AreaCodes> mono = this.getClient().getAllAreaCodes("selection", COUNTRY_CODE, PHONE_PLAN_ID, locationOptions); StepVerifier.create(mono) .assertNext(item -> { assertTrue(item.getPrimaryAreaCodes().size() > 0); }) .verifyComplete(); } @Test() public void getAllAreaCodesWithResponse() { List<LocationOptionsQuery> locationOptions = new ArrayList<>(); LocationOptionsQuery query = new LocationOptionsQuery(); query.setLabelId("state"); query.setOptionsValue(LOCATION_OPTION_STATE); locationOptions.add(query); query = new LocationOptionsQuery(); query.setLabelId("city"); query.setOptionsValue(LOCATION_OPTION_CITY); locationOptions.add(query); Mono<Response<AreaCodes>> mono = this.getClient().getAllAreaCodesWithResponse( "selection", COUNTRY_CODE, PHONE_PLAN_ID, locationOptions, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertTrue(item.getValue().getPrimaryAreaCodes().size() > 0); }) .verifyComplete(); } @Test() public void updateCapabilities() { List<Capability> capabilitiesToAdd = new ArrayList<>(); capabilitiesToAdd.add(Capability.INBOUND_CALLING); NumberUpdateCapabilities update = new NumberUpdateCapabilities(); update.setAdd(capabilitiesToAdd); Map<PhoneNumber, NumberUpdateCapabilities> updateMap = new HashMap<>(); updateMap.put(new PhoneNumber(PHONENUMBER_FOR_CAPABILITIES), update); Mono<UpdateNumberCapabilitiesResponse> mono = this.getClient().updateCapabilities(updateMap); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void updateCapabilitiesWithResponse() { List<Capability> capabilitiesToAdd = new ArrayList<>(); capabilitiesToAdd.add(Capability.INBOUND_CALLING); NumberUpdateCapabilities update = new NumberUpdateCapabilities(); update.setAdd(capabilitiesToAdd); Map<PhoneNumber, NumberUpdateCapabilities> updateMap = new HashMap<>(); updateMap.put(new PhoneNumber(PHONENUMBER_FOR_CAPABILITIES), update); Mono<Response<UpdateNumberCapabilitiesResponse>> mono = this.getClient().updateCapabilitiesWithResponse(updateMap, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void getCapabilitiesUpdate() { Mono<UpdatePhoneNumberCapabilitiesResponse> mono = this.getClient().getCapabilitiesUpdate(CAPABILITIES_ID); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void getCapabilitiesUpdateWithResponse() { Mono<Response<UpdatePhoneNumberCapabilitiesResponse>> mono = this.getClient().getCapabilitiesUpdateWithResponse(CAPABILITIES_ID, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void createSearch() { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription("testsearch20200014") .setDisplayName("testsearch20200014") .setPhonePlanIds(phonePlanIds) .setQuantity(1); Mono<CreateSearchResponse> mono = this.getClient().createSearch(createSearchOptions); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getSearchId()); }) .verifyComplete(); } @Test() public void createSearchWithResponse() { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription("testsearch20200014") .setDisplayName("testsearch20200014") .setPhonePlanIds(phonePlanIds) .setQuantity(1); Mono<Response<CreateSearchResponse>> mono = this.getClient().createSearchWithResponse(createSearchOptions, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(201, item.getStatusCode()); assertNotNull(item.getValue().getSearchId()); }) .verifyComplete(); } @Test() public void getSearchById() { Mono<PhoneNumberSearch> mono = this.getClient().getSearchById(SEARCH_ID); StepVerifier.create(mono) .assertNext(item -> { assertEquals(SEARCH_ID, item.getSearchId()); }) .verifyComplete(); } @Test() public void getSearchByIdWithResponse() { Mono<Response<PhoneNumberSearch>> mono = this.getClient().getSearchByIdWithResponse(SEARCH_ID, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertEquals(SEARCH_ID, item.getValue().getSearchId()); }) .verifyComplete(); } @Test() public void purchaseSearch() { Mono<Void> mono = this.getClient().purchaseSearch(SEARCH_ID_TO_PURCHASE); StepVerifier.create(mono).verifyComplete(); } @Test() public void purchaseSearchWithResponse() { Mono<Response<Void>> mono = this.getClient().purchaseSearchWithResponse(SEARCH_ID_TO_PURCHASE, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(202, item.getStatusCode()); }) .verifyComplete(); } @Test() public void cancelSearch() { Mono<Void> mono = this.getClient().cancelSearch(SEARCH_ID_TO_CANCEL); StepVerifier.create(mono).verifyComplete(); } @Test() public void cancelSearchWithResponse() { Mono<Response<Void>> mono = this.getClient().cancelSearchWithResponse(SEARCH_ID_TO_CANCEL, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(202, item.getStatusCode()); }) .verifyComplete(); } @Test() public void configureNumber() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_CONFIGURE); PstnConfiguration pstnConfiguration = new PstnConfiguration(); pstnConfiguration.setApplicationId("ApplicationId"); pstnConfiguration.setCallbackUrl("https: Mono<Void> mono = this.getClient().configureNumber(number, pstnConfiguration); StepVerifier.create(mono).verifyComplete(); } @Test() public void configureNumberWithResponse() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_CONFIGURE); PstnConfiguration pstnConfiguration = new PstnConfiguration(); pstnConfiguration.setApplicationId("ApplicationId"); pstnConfiguration.setCallbackUrl("https: Mono<Response<Void>> mono = this.getClient().configureNumberWithResponse(number, pstnConfiguration, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); }) .verifyComplete(); } @Test() public void getNumberConfiguration() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_GET_CONFIG); Mono<NumberConfigurationResponse> mono = this.getClient().getNumberConfiguration(number); StepVerifier.create(mono) .assertNext(item -> { assertEquals("ApplicationId", item.getPstnConfiguration().getApplicationId()); }) .verifyComplete(); } @Test() public void getNumberConfigurationWithResponse() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_GET_CONFIG); Mono<Response<NumberConfigurationResponse>> mono = this.getClient().getNumberConfigurationWithResponse(number, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertEquals("ApplicationId", item.getValue().getPstnConfiguration().getApplicationId()); }) .verifyComplete(); } @Test() public void unconfigureNumber() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_UNCONFIGURE); Mono<Void> mono = this.getClient().unconfigureNumber(number); StepVerifier.create(mono).verifyComplete(); } @Test() public void unconfigureNumberWithResponse() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_UNCONFIGURE); Mono<Response<Void>> mono = this.getClient().unconfigureNumberWithResponse(number, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); }) .verifyComplete(); } @Test() public void releasePhoneNumbers() { List<PhoneNumber> phoneNumbers = new ArrayList<>(); phoneNumbers.add(new PhoneNumber(PHONENUMBER_TO_RELEASE)); Mono<ReleaseResponse> mono = this.getClient().releasePhoneNumbers(phoneNumbers); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getReleaseId()); }) .verifyComplete(); } @Test() public void releasePhoneNumbersWithResponse() { List<PhoneNumber> phoneNumbers = new ArrayList<>(); phoneNumbers.add(new PhoneNumber(PHONENUMBER_TO_RELEASE)); Mono<Response<ReleaseResponse>> mono = this.getClient().releasePhoneNumbersWithResponse(phoneNumbers, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getReleaseId()); }) .verifyComplete(); } @Test() private PhoneNumberAsyncClient getClient() { return super.getClientBuilder().buildAsyncClient(); } }
class PhoneNumberAsyncClientIntegrationTest extends PhoneNumberIntegrationTestBase { @Test() public void createAsyncPhoneNumberClientWithConnectionString() { PhoneNumberAsyncClient phoneNumberAsyncClient = getClientBuilderWithConnectionString().buildAsyncClient(); assertNotNull(phoneNumberAsyncClient); PagedFlux<AcquiredPhoneNumber> pagedFlux = phoneNumberAsyncClient.listAllPhoneNumbers(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhoneNumber()); }) .verifyComplete(); } @Test() public void listAllPhoneNumbers() { PagedFlux<AcquiredPhoneNumber> pagedFlux = this.getClient().listAllPhoneNumbers(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhoneNumber()); }) .verifyComplete(); } @Test() public void listPhonePlanGroups() { PagedFlux<PhonePlanGroup> pagedFlux = this.getClient().listPhonePlanGroups(COUNTRY_CODE, LOCALE, true); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhonePlanGroupId()); }) .verifyComplete(); } @Test() public void listPhonePlans() { PagedFlux<PhonePlan> pagedFlux = this.getClient().listPhonePlans(COUNTRY_CODE, PHONE_PLAN_GROUP_ID, LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhonePlanId()); }) .verifyComplete(); } @Test() public void listAllReleases() { PagedFlux<PhoneNumberEntity> pagedFlux = this.getClient().listAllReleases(); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getId()); }) .verifyComplete(); } @Test() public void listAllSearches() { PagedFlux<PhoneNumberEntity> pagedFlux = this.getClient().listAllSearches(); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getId()); }) .verifyComplete(); } @Test() public void listAllSupportedCountries() { PagedFlux<PhoneNumberCountry> pagedFlux = this.getClient().listAllSupportedCountries(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getCountryCode()); }) .verifyComplete(); } @Test() public void getPhonePlanLocationOptions() { Mono<LocationOptionsResponse> mono = this.getClient().getPhonePlanLocationOptions(COUNTRY_CODE, PHONE_PLAN_GROUP_ID, PHONE_PLAN_ID, LOCALE); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getLocationOptions().getLabelId()); }) .verifyComplete(); } @Test() public void getAllAreaCodes() { List<LocationOptionsQuery> locationOptions = new ArrayList<>(); LocationOptionsQuery query = new LocationOptionsQuery(); query.setLabelId("state"); query.setOptionsValue(LOCATION_OPTION_STATE); locationOptions.add(query); query = new LocationOptionsQuery(); query.setLabelId("city"); query.setOptionsValue(LOCATION_OPTION_CITY); locationOptions.add(query); Mono<AreaCodes> mono = this.getClient().getAllAreaCodes("selection", COUNTRY_CODE, PHONE_PLAN_ID, locationOptions); StepVerifier.create(mono) .assertNext(item -> { assertTrue(item.getPrimaryAreaCodes().size() > 0); }) .verifyComplete(); } @Test() public void getAllAreaCodesWithResponse() { List<LocationOptionsQuery> locationOptions = new ArrayList<>(); LocationOptionsQuery query = new LocationOptionsQuery(); query.setLabelId("state"); query.setOptionsValue(LOCATION_OPTION_STATE); locationOptions.add(query); query = new LocationOptionsQuery(); query.setLabelId("city"); query.setOptionsValue(LOCATION_OPTION_CITY); locationOptions.add(query); Mono<Response<AreaCodes>> mono = this.getClient().getAllAreaCodesWithResponse( "selection", COUNTRY_CODE, PHONE_PLAN_ID, locationOptions, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertTrue(item.getValue().getPrimaryAreaCodes().size() > 0); }) .verifyComplete(); } @Test() public void updateCapabilities() { List<Capability> capabilitiesToAdd = new ArrayList<>(); capabilitiesToAdd.add(Capability.INBOUND_CALLING); NumberUpdateCapabilities update = new NumberUpdateCapabilities(); update.setAdd(capabilitiesToAdd); Map<PhoneNumber, NumberUpdateCapabilities> updateMap = new HashMap<>(); updateMap.put(new PhoneNumber(PHONENUMBER_FOR_CAPABILITIES), update); Mono<UpdateNumberCapabilitiesResponse> mono = this.getClient().updateCapabilities(updateMap); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void updateCapabilitiesWithResponse() { List<Capability> capabilitiesToAdd = new ArrayList<>(); capabilitiesToAdd.add(Capability.INBOUND_CALLING); NumberUpdateCapabilities update = new NumberUpdateCapabilities(); update.setAdd(capabilitiesToAdd); Map<PhoneNumber, NumberUpdateCapabilities> updateMap = new HashMap<>(); updateMap.put(new PhoneNumber(PHONENUMBER_FOR_CAPABILITIES), update); Mono<Response<UpdateNumberCapabilitiesResponse>> mono = this.getClient().updateCapabilitiesWithResponse(updateMap, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void getCapabilitiesUpdate() { Mono<UpdatePhoneNumberCapabilitiesResponse> mono = this.getClient().getCapabilitiesUpdate(CAPABILITIES_ID); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void getCapabilitiesUpdateWithResponse() { Mono<Response<UpdatePhoneNumberCapabilitiesResponse>> mono = this.getClient().getCapabilitiesUpdateWithResponse(CAPABILITIES_ID, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void createSearch() { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription("testsearch20200014") .setDisplayName("testsearch20200014") .setPhonePlanIds(phonePlanIds) .setQuantity(1); Mono<CreateSearchResponse> mono = this.getClient().createSearch(createSearchOptions); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getSearchId()); }) .verifyComplete(); } @Test() public void createSearchWithResponse() { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription("testsearch20200014") .setDisplayName("testsearch20200014") .setPhonePlanIds(phonePlanIds) .setQuantity(1); Mono<Response<CreateSearchResponse>> mono = this.getClient().createSearchWithResponse(createSearchOptions, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(201, item.getStatusCode()); assertNotNull(item.getValue().getSearchId()); }) .verifyComplete(); } @Test() public void getSearchById() { Mono<PhoneNumberSearch> mono = this.getClient().getSearchById(SEARCH_ID); StepVerifier.create(mono) .assertNext(item -> { assertEquals(SEARCH_ID, item.getSearchId()); }) .verifyComplete(); } @Test() public void getSearchByIdWithResponse() { Mono<Response<PhoneNumberSearch>> mono = this.getClient().getSearchByIdWithResponse(SEARCH_ID, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertEquals(SEARCH_ID, item.getValue().getSearchId()); }) .verifyComplete(); } @Test() public void purchaseSearch() { Mono<Void> mono = this.getClient().purchaseSearch(SEARCH_ID_TO_PURCHASE); StepVerifier.create(mono).verifyComplete(); } @Test() public void purchaseSearchWithResponse() { Mono<Response<Void>> mono = this.getClient().purchaseSearchWithResponse(SEARCH_ID_TO_PURCHASE, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(202, item.getStatusCode()); }) .verifyComplete(); } @Test() public void cancelSearch() { Mono<Void> mono = this.getClient().cancelSearch(SEARCH_ID_TO_CANCEL); StepVerifier.create(mono).verifyComplete(); } @Test() public void cancelSearchWithResponse() { Mono<Response<Void>> mono = this.getClient().cancelSearchWithResponse(SEARCH_ID_TO_CANCEL, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(202, item.getStatusCode()); }) .verifyComplete(); } @Test() public void configureNumber() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_CONFIGURE); PstnConfiguration pstnConfiguration = new PstnConfiguration(); pstnConfiguration.setApplicationId("ApplicationId"); pstnConfiguration.setCallbackUrl("https: Mono<Void> mono = this.getClient().configureNumber(number, pstnConfiguration); StepVerifier.create(mono).verifyComplete(); } @Test() public void configureNumberWithResponse() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_CONFIGURE); PstnConfiguration pstnConfiguration = new PstnConfiguration(); pstnConfiguration.setApplicationId("ApplicationId"); pstnConfiguration.setCallbackUrl("https: Mono<Response<Void>> mono = this.getClient().configureNumberWithResponse(number, pstnConfiguration, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); }) .verifyComplete(); } @Test() public void getNumberConfiguration() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_GET_CONFIG); Mono<NumberConfigurationResponse> mono = this.getClient().getNumberConfiguration(number); StepVerifier.create(mono) .assertNext(item -> { assertEquals("ApplicationId", item.getPstnConfiguration().getApplicationId()); }) .verifyComplete(); } @Test() public void getNumberConfigurationWithResponse() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_GET_CONFIG); Mono<Response<NumberConfigurationResponse>> mono = this.getClient().getNumberConfigurationWithResponse(number, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertEquals("ApplicationId", item.getValue().getPstnConfiguration().getApplicationId()); }) .verifyComplete(); } @Test() public void unconfigureNumber() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_UNCONFIGURE); Mono<Void> mono = this.getClient().unconfigureNumber(number); StepVerifier.create(mono).verifyComplete(); } @Test() public void unconfigureNumberWithResponse() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_UNCONFIGURE); Mono<Response<Void>> mono = this.getClient().unconfigureNumberWithResponse(number, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); }) .verifyComplete(); } @Test() public void releasePhoneNumbers() { List<PhoneNumber> phoneNumbers = new ArrayList<>(); phoneNumbers.add(new PhoneNumber(PHONENUMBER_TO_RELEASE)); Mono<ReleaseResponse> mono = this.getClient().releasePhoneNumbers(phoneNumbers); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getReleaseId()); }) .verifyComplete(); } @Test() public void releasePhoneNumbersWithResponse() { List<PhoneNumber> phoneNumbers = new ArrayList<>(); phoneNumbers.add(new PhoneNumber(PHONENUMBER_TO_RELEASE)); Mono<Response<ReleaseResponse>> mono = this.getClient().releasePhoneNumbersWithResponse(phoneNumbers, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getReleaseId()); }) .verifyComplete(); } @Test() private PhoneNumberAsyncClient getClient() { return super.getClientBuilder().buildAsyncClient(); } }
This checks one path of functionality.. (Create and Validate Response).. but shouldn't we also test that the developer can poll and get the status? Another scenario would be the cancel itself?
public void beginCreateSearch() { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add("27b53eec-8ff4-4070-8900-fbeaabfd158a"); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription("testsearch20200014") .setDisplayName("testsearch20200014") .setPhonePlanIds(phonePlanIds) .setQuantity(2); Duration duration = Duration.ofSeconds(5); PhoneNumberAsyncClient client = this.getClient(); PollerFlux<PhoneNumberSearch, PhoneNumberSearch> res = client.beginCreateSearch(createSearchOptions, duration); SyncPoller<PhoneNumberSearch, PhoneNumberSearch> sync = res.getSyncPoller(); sync.waitForCompletion(); PhoneNumberSearch testResult = sync.getFinalResult(); assertEquals(testResult.getPhoneNumbers().size(), 2); assertNotNull(testResult.getSearchId()); }
PhoneNumberSearch testResult = sync.getFinalResult();
public void beginCreateSearch() { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription(SEARCH_OPTIONS_DESCRIPTION) .setDisplayName(SEARCH_OPTIONS_NAME) .setPhonePlanIds(phonePlanIds) .setQuantity(2); Duration duration = Duration.ofSeconds(1); PhoneNumberAsyncClient client = this.getClient(); PollerFlux<PhoneNumberSearch, PhoneNumberSearch> poller = client.beginCreateSearch(createSearchOptions, duration); AsyncPollResponse<PhoneNumberSearch, PhoneNumberSearch> asyncRes = poller.takeUntil(apr -> apr.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED) .blockLast(); PhoneNumberSearch testResult = asyncRes.getValue(); assertEquals(testResult.getPhoneNumbers().size(), 2); assertNotNull(testResult.getSearchId()); }
class PhoneNumberAsyncClientIntegrationTest extends PhoneNumberIntegrationTestBase { @Test() public void listAllPhoneNumbers() { PagedFlux<AcquiredPhoneNumber> pagedFlux = this.getClient().listAllPhoneNumbers(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhoneNumber()); }) .verifyComplete(); } @Test() public void listPhonePlanGroups() { PagedFlux<PhonePlanGroup> pagedFlux = this.getClient().listPhonePlanGroups(COUNTRY_CODE, LOCALE, true); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhonePlanGroupId()); }) .verifyComplete(); } @Test() public void listPhonePlans() { PagedFlux<PhonePlan> pagedFlux = this.getClient().listPhonePlans(COUNTRY_CODE, PHONE_PLAN_GROUP_ID, LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhonePlanId()); }) .verifyComplete(); } @Test() public void listAllReleases() { PagedFlux<PhoneNumberEntity> pagedFlux = this.getClient().listAllReleases(); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getId()); }) .verifyComplete(); } @Test() public void listAllSearches() { PagedFlux<PhoneNumberEntity> pagedFlux = this.getClient().listAllSearches(); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getId()); }) .verifyComplete(); } @Test() public void listAllSupportedCountries() { PagedFlux<PhoneNumberCountry> pagedFlux = this.getClient().listAllSupportedCountries(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getCountryCode()); }) .verifyComplete(); } @Test() public void getPhonePlanLocationOptions() { Mono<LocationOptionsResponse> mono = this.getClient().getPhonePlanLocationOptions(COUNTRY_CODE, PHONE_PLAN_GROUP_ID, PHONE_PLAN_ID, LOCALE); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getLocationOptions().getLabelId()); }) .verifyComplete(); } @Test() public void getAllAreaCodes() { List<LocationOptionsQuery> locationOptions = new ArrayList<>(); LocationOptionsQuery query = new LocationOptionsQuery(); query.setLabelId("state"); query.setOptionsValue(LOCATION_OPTION_STATE); locationOptions.add(query); query = new LocationOptionsQuery(); query.setLabelId("city"); query.setOptionsValue(LOCATION_OPTION_CITY); locationOptions.add(query); Mono<AreaCodes> mono = this.getClient().getAllAreaCodes("selection", COUNTRY_CODE, PHONE_PLAN_ID, locationOptions); StepVerifier.create(mono) .assertNext(item -> { assertTrue(item.getPrimaryAreaCodes().size() > 0); }) .verifyComplete(); } @Test() public void getAllAreaCodesWithResponse() { List<LocationOptionsQuery> locationOptions = new ArrayList<>(); LocationOptionsQuery query = new LocationOptionsQuery(); query.setLabelId("state"); query.setOptionsValue(LOCATION_OPTION_STATE); locationOptions.add(query); query = new LocationOptionsQuery(); query.setLabelId("city"); query.setOptionsValue(LOCATION_OPTION_CITY); locationOptions.add(query); Mono<Response<AreaCodes>> mono = this.getClient().getAllAreaCodesWithResponse( "selection", COUNTRY_CODE, PHONE_PLAN_ID, locationOptions, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertTrue(item.getValue().getPrimaryAreaCodes().size() > 0); }) .verifyComplete(); } @Test() public void updateCapabilities() { List<Capability> capabilitiesToAdd = new ArrayList<>(); capabilitiesToAdd.add(Capability.INBOUND_CALLING); NumberUpdateCapabilities update = new NumberUpdateCapabilities(); update.setAdd(capabilitiesToAdd); Map<PhoneNumber, NumberUpdateCapabilities> updateMap = new HashMap<>(); updateMap.put(new PhoneNumber(PHONENUMBER_FOR_CAPABILITIES), update); Mono<UpdateNumberCapabilitiesResponse> mono = this.getClient().updateCapabilities(updateMap); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void updateCapabilitiesWithResponse() { List<Capability> capabilitiesToAdd = new ArrayList<>(); capabilitiesToAdd.add(Capability.INBOUND_CALLING); NumberUpdateCapabilities update = new NumberUpdateCapabilities(); update.setAdd(capabilitiesToAdd); Map<PhoneNumber, NumberUpdateCapabilities> updateMap = new HashMap<>(); updateMap.put(new PhoneNumber(PHONENUMBER_FOR_CAPABILITIES), update); Mono<Response<UpdateNumberCapabilitiesResponse>> mono = this.getClient().updateCapabilitiesWithResponse(updateMap, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void getCapabilitiesUpdate() { Mono<UpdatePhoneNumberCapabilitiesResponse> mono = this.getClient().getCapabilitiesUpdate(CAPABILITIES_ID); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void getCapabilitiesUpdateWithResponse() { Mono<Response<UpdatePhoneNumberCapabilitiesResponse>> mono = this.getClient().getCapabilitiesUpdateWithResponse(CAPABILITIES_ID, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void createSearch() { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription("testsearch20200014") .setDisplayName("testsearch20200014") .setPhonePlanIds(phonePlanIds) .setQuantity(1); Mono<CreateSearchResponse> mono = this.getClient().createSearch(createSearchOptions); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getSearchId()); }) .verifyComplete(); } @Test() public void createSearchWithResponse() { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription("testsearch20200014") .setDisplayName("testsearch20200014") .setPhonePlanIds(phonePlanIds) .setQuantity(1); Mono<Response<CreateSearchResponse>> mono = this.getClient().createSearchWithResponse(createSearchOptions, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(201, item.getStatusCode()); assertNotNull(item.getValue().getSearchId()); }) .verifyComplete(); } @Test() public void getSearchById() { Mono<PhoneNumberSearch> mono = this.getClient().getSearchById(SEARCH_ID); StepVerifier.create(mono) .assertNext(item -> { assertEquals(SEARCH_ID, item.getSearchId()); }) .verifyComplete(); } @Test() public void getSearchByIdWithResponse() { Mono<Response<PhoneNumberSearch>> mono = this.getClient().getSearchByIdWithResponse(SEARCH_ID, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertEquals(SEARCH_ID, item.getValue().getSearchId()); }) .verifyComplete(); } @Test() public void purchaseSearch() { Mono<Void> mono = this.getClient().purchaseSearch(SEARCH_ID_TO_PURCHASE); StepVerifier.create(mono).verifyComplete(); } @Test() public void purchaseSearchWithResponse() { Mono<Response<Void>> mono = this.getClient().purchaseSearchWithResponse(SEARCH_ID_TO_PURCHASE, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(202, item.getStatusCode()); }) .verifyComplete(); } @Test() public void cancelSearch() { Mono<Void> mono = this.getClient().cancelSearch(SEARCH_ID_TO_CANCEL); StepVerifier.create(mono).verifyComplete(); } @Test() public void cancelSearchWithResponse() { Mono<Response<Void>> mono = this.getClient().cancelSearchWithResponse(SEARCH_ID_TO_CANCEL, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(202, item.getStatusCode()); }) .verifyComplete(); } @Test() public void configureNumber() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_CONFIGURE); PstnConfiguration pstnConfiguration = new PstnConfiguration(); pstnConfiguration.setApplicationId("ApplicationId"); pstnConfiguration.setCallbackUrl("https: Mono<Void> mono = this.getClient().configureNumber(number, pstnConfiguration); StepVerifier.create(mono).verifyComplete(); } @Test() public void configureNumberWithResponse() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_CONFIGURE); PstnConfiguration pstnConfiguration = new PstnConfiguration(); pstnConfiguration.setApplicationId("ApplicationId"); pstnConfiguration.setCallbackUrl("https: Mono<Response<Void>> mono = this.getClient().configureNumberWithResponse(number, pstnConfiguration, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); }) .verifyComplete(); } @Test() public void getNumberConfiguration() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_GET_CONFIG); Mono<NumberConfigurationResponse> mono = this.getClient().getNumberConfiguration(number); StepVerifier.create(mono) .assertNext(item -> { assertEquals("ApplicationId", item.getPstnConfiguration().getApplicationId()); }) .verifyComplete(); } @Test() public void getNumberConfigurationWithResponse() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_GET_CONFIG); Mono<Response<NumberConfigurationResponse>> mono = this.getClient().getNumberConfigurationWithResponse(number, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertEquals("ApplicationId", item.getValue().getPstnConfiguration().getApplicationId()); }) .verifyComplete(); } @Test() public void unconfigureNumber() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_UNCONFIGURE); Mono<Void> mono = this.getClient().unconfigureNumber(number); StepVerifier.create(mono).verifyComplete(); } @Test() public void unconfigureNumberWithResponse() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_UNCONFIGURE); Mono<Response<Void>> mono = this.getClient().unconfigureNumberWithResponse(number, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); }) .verifyComplete(); } @Test() public void releasePhoneNumbers() { List<PhoneNumber> phoneNumbers = new ArrayList<>(); phoneNumbers.add(new PhoneNumber(PHONENUMBER_TO_RELEASE)); Mono<ReleaseResponse> mono = this.getClient().releasePhoneNumbers(phoneNumbers); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getReleaseId()); }) .verifyComplete(); } @Test() public void releasePhoneNumbersWithResponse() { List<PhoneNumber> phoneNumbers = new ArrayList<>(); phoneNumbers.add(new PhoneNumber(PHONENUMBER_TO_RELEASE)); Mono<Response<ReleaseResponse>> mono = this.getClient().releasePhoneNumbersWithResponse(phoneNumbers, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getReleaseId()); }) .verifyComplete(); } @Test() private PhoneNumberAsyncClient getClient() { return super.getClientBuilder().buildAsyncClient(); } }
class PhoneNumberAsyncClientIntegrationTest extends PhoneNumberIntegrationTestBase { @Test() public void createAsyncPhoneNumberClientWithConnectionString() { PhoneNumberAsyncClient phoneNumberAsyncClient = getClientBuilderWithConnectionString().buildAsyncClient(); assertNotNull(phoneNumberAsyncClient); PagedFlux<AcquiredPhoneNumber> pagedFlux = phoneNumberAsyncClient.listAllPhoneNumbers(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhoneNumber()); }) .verifyComplete(); } @Test() public void listAllPhoneNumbers() { PagedFlux<AcquiredPhoneNumber> pagedFlux = this.getClient().listAllPhoneNumbers(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhoneNumber()); }) .verifyComplete(); } @Test() public void listPhonePlanGroups() { PagedFlux<PhonePlanGroup> pagedFlux = this.getClient().listPhonePlanGroups(COUNTRY_CODE, LOCALE, true); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhonePlanGroupId()); }) .verifyComplete(); } @Test() public void listPhonePlans() { PagedFlux<PhonePlan> pagedFlux = this.getClient().listPhonePlans(COUNTRY_CODE, PHONE_PLAN_GROUP_ID, LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhonePlanId()); }) .verifyComplete(); } @Test() public void listAllReleases() { PagedFlux<PhoneNumberEntity> pagedFlux = this.getClient().listAllReleases(); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getId()); }) .verifyComplete(); } @Test() public void listAllSearches() { PagedFlux<PhoneNumberEntity> pagedFlux = this.getClient().listAllSearches(); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getId()); }) .verifyComplete(); } @Test() public void listAllSupportedCountries() { PagedFlux<PhoneNumberCountry> pagedFlux = this.getClient().listAllSupportedCountries(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getCountryCode()); }) .verifyComplete(); } @Test() public void getPhonePlanLocationOptions() { Mono<LocationOptionsResponse> mono = this.getClient().getPhonePlanLocationOptions(COUNTRY_CODE, PHONE_PLAN_GROUP_ID, PHONE_PLAN_ID, LOCALE); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getLocationOptions().getLabelId()); }) .verifyComplete(); } @Test() public void getAllAreaCodes() { List<LocationOptionsQuery> locationOptions = new ArrayList<>(); LocationOptionsQuery query = new LocationOptionsQuery(); query.setLabelId("state"); query.setOptionsValue(LOCATION_OPTION_STATE); locationOptions.add(query); query = new LocationOptionsQuery(); query.setLabelId("city"); query.setOptionsValue(LOCATION_OPTION_CITY); locationOptions.add(query); Mono<AreaCodes> mono = this.getClient().getAllAreaCodes("selection", COUNTRY_CODE, PHONE_PLAN_ID, locationOptions); StepVerifier.create(mono) .assertNext(item -> { assertTrue(item.getPrimaryAreaCodes().size() > 0); }) .verifyComplete(); } @Test() public void getAllAreaCodesWithResponse() { List<LocationOptionsQuery> locationOptions = new ArrayList<>(); LocationOptionsQuery query = new LocationOptionsQuery(); query.setLabelId("state"); query.setOptionsValue(LOCATION_OPTION_STATE); locationOptions.add(query); query = new LocationOptionsQuery(); query.setLabelId("city"); query.setOptionsValue(LOCATION_OPTION_CITY); locationOptions.add(query); Mono<Response<AreaCodes>> mono = this.getClient().getAllAreaCodesWithResponse( "selection", COUNTRY_CODE, PHONE_PLAN_ID, locationOptions, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertTrue(item.getValue().getPrimaryAreaCodes().size() > 0); }) .verifyComplete(); } @Test() public void updateCapabilities() { List<Capability> capabilitiesToAdd = new ArrayList<>(); capabilitiesToAdd.add(Capability.INBOUND_CALLING); NumberUpdateCapabilities update = new NumberUpdateCapabilities(); update.setAdd(capabilitiesToAdd); Map<PhoneNumber, NumberUpdateCapabilities> updateMap = new HashMap<>(); updateMap.put(new PhoneNumber(PHONENUMBER_FOR_CAPABILITIES), update); Mono<UpdateNumberCapabilitiesResponse> mono = this.getClient().updateCapabilities(updateMap); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void updateCapabilitiesWithResponse() { List<Capability> capabilitiesToAdd = new ArrayList<>(); capabilitiesToAdd.add(Capability.INBOUND_CALLING); NumberUpdateCapabilities update = new NumberUpdateCapabilities(); update.setAdd(capabilitiesToAdd); Map<PhoneNumber, NumberUpdateCapabilities> updateMap = new HashMap<>(); updateMap.put(new PhoneNumber(PHONENUMBER_FOR_CAPABILITIES), update); Mono<Response<UpdateNumberCapabilitiesResponse>> mono = this.getClient().updateCapabilitiesWithResponse(updateMap, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void getCapabilitiesUpdate() { Mono<UpdatePhoneNumberCapabilitiesResponse> mono = this.getClient().getCapabilitiesUpdate(CAPABILITIES_ID); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void getCapabilitiesUpdateWithResponse() { Mono<Response<UpdatePhoneNumberCapabilitiesResponse>> mono = this.getClient().getCapabilitiesUpdateWithResponse(CAPABILITIES_ID, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void createSearch() { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription("testsearch20200014") .setDisplayName("testsearch20200014") .setPhonePlanIds(phonePlanIds) .setQuantity(1); Mono<CreateSearchResponse> mono = this.getClient().createSearch(createSearchOptions); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getSearchId()); }) .verifyComplete(); } @Test() public void createSearchWithResponse() { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription("testsearch20200014") .setDisplayName("testsearch20200014") .setPhonePlanIds(phonePlanIds) .setQuantity(1); Mono<Response<CreateSearchResponse>> mono = this.getClient().createSearchWithResponse(createSearchOptions, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(201, item.getStatusCode()); assertNotNull(item.getValue().getSearchId()); }) .verifyComplete(); } @Test() public void getSearchById() { Mono<PhoneNumberSearch> mono = this.getClient().getSearchById(SEARCH_ID); StepVerifier.create(mono) .assertNext(item -> { assertEquals(SEARCH_ID, item.getSearchId()); }) .verifyComplete(); } @Test() public void getSearchByIdWithResponse() { Mono<Response<PhoneNumberSearch>> mono = this.getClient().getSearchByIdWithResponse(SEARCH_ID, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertEquals(SEARCH_ID, item.getValue().getSearchId()); }) .verifyComplete(); } @Test() public void purchaseSearch() { Mono<Void> mono = this.getClient().purchaseSearch(SEARCH_ID_TO_PURCHASE); StepVerifier.create(mono).verifyComplete(); } @Test() public void purchaseSearchWithResponse() { Mono<Response<Void>> mono = this.getClient().purchaseSearchWithResponse(SEARCH_ID_TO_PURCHASE, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(202, item.getStatusCode()); }) .verifyComplete(); } @Test() public void cancelSearch() { Mono<Void> mono = this.getClient().cancelSearch(SEARCH_ID_TO_CANCEL); StepVerifier.create(mono).verifyComplete(); } @Test() public void cancelSearchWithResponse() { Mono<Response<Void>> mono = this.getClient().cancelSearchWithResponse(SEARCH_ID_TO_CANCEL, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(202, item.getStatusCode()); }) .verifyComplete(); } @Test() public void configureNumber() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_CONFIGURE); PstnConfiguration pstnConfiguration = new PstnConfiguration(); pstnConfiguration.setApplicationId("ApplicationId"); pstnConfiguration.setCallbackUrl("https: Mono<Void> mono = this.getClient().configureNumber(number, pstnConfiguration); StepVerifier.create(mono).verifyComplete(); } @Test() public void configureNumberWithResponse() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_CONFIGURE); PstnConfiguration pstnConfiguration = new PstnConfiguration(); pstnConfiguration.setApplicationId("ApplicationId"); pstnConfiguration.setCallbackUrl("https: Mono<Response<Void>> mono = this.getClient().configureNumberWithResponse(number, pstnConfiguration, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); }) .verifyComplete(); } @Test() public void getNumberConfiguration() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_GET_CONFIG); Mono<NumberConfigurationResponse> mono = this.getClient().getNumberConfiguration(number); StepVerifier.create(mono) .assertNext(item -> { assertEquals("ApplicationId", item.getPstnConfiguration().getApplicationId()); }) .verifyComplete(); } @Test() public void getNumberConfigurationWithResponse() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_GET_CONFIG); Mono<Response<NumberConfigurationResponse>> mono = this.getClient().getNumberConfigurationWithResponse(number, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertEquals("ApplicationId", item.getValue().getPstnConfiguration().getApplicationId()); }) .verifyComplete(); } @Test() public void unconfigureNumber() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_UNCONFIGURE); Mono<Void> mono = this.getClient().unconfigureNumber(number); StepVerifier.create(mono).verifyComplete(); } @Test() public void unconfigureNumberWithResponse() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_UNCONFIGURE); Mono<Response<Void>> mono = this.getClient().unconfigureNumberWithResponse(number, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); }) .verifyComplete(); } @Test() public void releasePhoneNumbers() { List<PhoneNumber> phoneNumbers = new ArrayList<>(); phoneNumbers.add(new PhoneNumber(PHONENUMBER_TO_RELEASE)); Mono<ReleaseResponse> mono = this.getClient().releasePhoneNumbers(phoneNumbers); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getReleaseId()); }) .verifyComplete(); } @Test() public void releasePhoneNumbersWithResponse() { List<PhoneNumber> phoneNumbers = new ArrayList<>(); phoneNumbers.add(new PhoneNumber(PHONENUMBER_TO_RELEASE)); Mono<Response<ReleaseResponse>> mono = this.getClient().releasePhoneNumbersWithResponse(phoneNumbers, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getReleaseId()); }) .verifyComplete(); } @Test() private PhoneNumberAsyncClient getClient() { return super.getClientBuilder().buildAsyncClient(); } }
If you call "getSyncPoller", your test is sync. Might as well call the sync client version and move this test to the sync client test file instead. For testing async version, you might want to test subscribe function of PollerFux with callback.
public void beginCreateSearch() { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription(SEARCH_OPTIONS_DESCRIPTION) .setDisplayName(SEARCH_OPTIONS_NAME) .setPhonePlanIds(phonePlanIds) .setQuantity(2); Duration duration = Duration.ofSeconds(1); PhoneNumberAsyncClient client = this.getClient(); SyncPoller<PhoneNumberSearch, PhoneNumberSearch> res = client.beginCreateSearch(createSearchOptions, duration).getSyncPoller(); res.waitForCompletion(); PhoneNumberSearch testResult = res.getFinalResult(); assertEquals(testResult.getPhoneNumbers().size(), 2); assertNotNull(testResult.getSearchId()); }
client.beginCreateSearch(createSearchOptions, duration).getSyncPoller();
public void beginCreateSearch() { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription(SEARCH_OPTIONS_DESCRIPTION) .setDisplayName(SEARCH_OPTIONS_NAME) .setPhonePlanIds(phonePlanIds) .setQuantity(2); Duration duration = Duration.ofSeconds(1); PhoneNumberAsyncClient client = this.getClient(); PollerFlux<PhoneNumberSearch, PhoneNumberSearch> poller = client.beginCreateSearch(createSearchOptions, duration); AsyncPollResponse<PhoneNumberSearch, PhoneNumberSearch> asyncRes = poller.takeUntil(apr -> apr.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED) .blockLast(); PhoneNumberSearch testResult = asyncRes.getValue(); assertEquals(testResult.getPhoneNumbers().size(), 2); assertNotNull(testResult.getSearchId()); }
class PhoneNumberAsyncClientIntegrationTest extends PhoneNumberIntegrationTestBase { @Test() public void createAsyncPhoneNumberClientWithConnectionString() { PhoneNumberAsyncClient phoneNumberAsyncClient = getClientBuilderWithConnectionString().buildAsyncClient(); assertNotNull(phoneNumberAsyncClient); PagedFlux<AcquiredPhoneNumber> pagedFlux = phoneNumberAsyncClient.listAllPhoneNumbers(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhoneNumber()); }) .verifyComplete(); } @Test() public void listAllPhoneNumbers() { PagedFlux<AcquiredPhoneNumber> pagedFlux = this.getClient().listAllPhoneNumbers(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhoneNumber()); }) .verifyComplete(); } @Test() public void listPhonePlanGroups() { PagedFlux<PhonePlanGroup> pagedFlux = this.getClient().listPhonePlanGroups(COUNTRY_CODE, LOCALE, true); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhonePlanGroupId()); }) .verifyComplete(); } @Test() public void listPhonePlans() { PagedFlux<PhonePlan> pagedFlux = this.getClient().listPhonePlans(COUNTRY_CODE, PHONE_PLAN_GROUP_ID, LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhonePlanId()); }) .verifyComplete(); } @Test() public void listAllReleases() { PagedFlux<PhoneNumberEntity> pagedFlux = this.getClient().listAllReleases(); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getId()); }) .verifyComplete(); } @Test() public void listAllSearches() { PagedFlux<PhoneNumberEntity> pagedFlux = this.getClient().listAllSearches(); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getId()); }) .verifyComplete(); } @Test() public void listAllSupportedCountries() { PagedFlux<PhoneNumberCountry> pagedFlux = this.getClient().listAllSupportedCountries(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getCountryCode()); }) .verifyComplete(); } @Test() public void getPhonePlanLocationOptions() { Mono<LocationOptionsResponse> mono = this.getClient().getPhonePlanLocationOptions(COUNTRY_CODE, PHONE_PLAN_GROUP_ID, PHONE_PLAN_ID, LOCALE); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getLocationOptions().getLabelId()); }) .verifyComplete(); } @Test() public void getAllAreaCodes() { List<LocationOptionsQuery> locationOptions = new ArrayList<>(); LocationOptionsQuery query = new LocationOptionsQuery(); query.setLabelId("state"); query.setOptionsValue(LOCATION_OPTION_STATE); locationOptions.add(query); query = new LocationOptionsQuery(); query.setLabelId("city"); query.setOptionsValue(LOCATION_OPTION_CITY); locationOptions.add(query); Mono<AreaCodes> mono = this.getClient().getAllAreaCodes("selection", COUNTRY_CODE, PHONE_PLAN_ID, locationOptions); StepVerifier.create(mono) .assertNext(item -> { assertTrue(item.getPrimaryAreaCodes().size() > 0); }) .verifyComplete(); } @Test() public void getAllAreaCodesWithResponse() { List<LocationOptionsQuery> locationOptions = new ArrayList<>(); LocationOptionsQuery query = new LocationOptionsQuery(); query.setLabelId("state"); query.setOptionsValue(LOCATION_OPTION_STATE); locationOptions.add(query); query = new LocationOptionsQuery(); query.setLabelId("city"); query.setOptionsValue(LOCATION_OPTION_CITY); locationOptions.add(query); Mono<Response<AreaCodes>> mono = this.getClient().getAllAreaCodesWithResponse( "selection", COUNTRY_CODE, PHONE_PLAN_ID, locationOptions, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertTrue(item.getValue().getPrimaryAreaCodes().size() > 0); }) .verifyComplete(); } @Test() public void updateCapabilities() { List<Capability> capabilitiesToAdd = new ArrayList<>(); capabilitiesToAdd.add(Capability.INBOUND_CALLING); NumberUpdateCapabilities update = new NumberUpdateCapabilities(); update.setAdd(capabilitiesToAdd); Map<PhoneNumber, NumberUpdateCapabilities> updateMap = new HashMap<>(); updateMap.put(new PhoneNumber(PHONENUMBER_FOR_CAPABILITIES), update); Mono<UpdateNumberCapabilitiesResponse> mono = this.getClient().updateCapabilities(updateMap); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void updateCapabilitiesWithResponse() { List<Capability> capabilitiesToAdd = new ArrayList<>(); capabilitiesToAdd.add(Capability.INBOUND_CALLING); NumberUpdateCapabilities update = new NumberUpdateCapabilities(); update.setAdd(capabilitiesToAdd); Map<PhoneNumber, NumberUpdateCapabilities> updateMap = new HashMap<>(); updateMap.put(new PhoneNumber(PHONENUMBER_FOR_CAPABILITIES), update); Mono<Response<UpdateNumberCapabilitiesResponse>> mono = this.getClient().updateCapabilitiesWithResponse(updateMap, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void getCapabilitiesUpdate() { Mono<UpdatePhoneNumberCapabilitiesResponse> mono = this.getClient().getCapabilitiesUpdate(CAPABILITIES_ID); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void getCapabilitiesUpdateWithResponse() { Mono<Response<UpdatePhoneNumberCapabilitiesResponse>> mono = this.getClient().getCapabilitiesUpdateWithResponse(CAPABILITIES_ID, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void createSearch() { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription("testsearch20200014") .setDisplayName("testsearch20200014") .setPhonePlanIds(phonePlanIds) .setQuantity(1); Mono<CreateSearchResponse> mono = this.getClient().createSearch(createSearchOptions); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getSearchId()); }) .verifyComplete(); } @Test() public void createSearchWithResponse() { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription("testsearch20200014") .setDisplayName("testsearch20200014") .setPhonePlanIds(phonePlanIds) .setQuantity(1); Mono<Response<CreateSearchResponse>> mono = this.getClient().createSearchWithResponse(createSearchOptions, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(201, item.getStatusCode()); assertNotNull(item.getValue().getSearchId()); }) .verifyComplete(); } @Test() public void getSearchById() { Mono<PhoneNumberSearch> mono = this.getClient().getSearchById(SEARCH_ID); StepVerifier.create(mono) .assertNext(item -> { assertEquals(SEARCH_ID, item.getSearchId()); }) .verifyComplete(); } @Test() public void getSearchByIdWithResponse() { Mono<Response<PhoneNumberSearch>> mono = this.getClient().getSearchByIdWithResponse(SEARCH_ID, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertEquals(SEARCH_ID, item.getValue().getSearchId()); }) .verifyComplete(); } @Test() public void purchaseSearch() { Mono<Void> mono = this.getClient().purchaseSearch(SEARCH_ID_TO_PURCHASE); StepVerifier.create(mono).verifyComplete(); } @Test() public void purchaseSearchWithResponse() { Mono<Response<Void>> mono = this.getClient().purchaseSearchWithResponse(SEARCH_ID_TO_PURCHASE, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(202, item.getStatusCode()); }) .verifyComplete(); } @Test() public void cancelSearch() { Mono<Void> mono = this.getClient().cancelSearch(SEARCH_ID_TO_CANCEL); StepVerifier.create(mono).verifyComplete(); } @Test() public void cancelSearchWithResponse() { Mono<Response<Void>> mono = this.getClient().cancelSearchWithResponse(SEARCH_ID_TO_CANCEL, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(202, item.getStatusCode()); }) .verifyComplete(); } @Test() public void configureNumber() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_CONFIGURE); PstnConfiguration pstnConfiguration = new PstnConfiguration(); pstnConfiguration.setApplicationId("ApplicationId"); pstnConfiguration.setCallbackUrl("https: Mono<Void> mono = this.getClient().configureNumber(number, pstnConfiguration); StepVerifier.create(mono).verifyComplete(); } @Test() public void configureNumberWithResponse() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_CONFIGURE); PstnConfiguration pstnConfiguration = new PstnConfiguration(); pstnConfiguration.setApplicationId("ApplicationId"); pstnConfiguration.setCallbackUrl("https: Mono<Response<Void>> mono = this.getClient().configureNumberWithResponse(number, pstnConfiguration, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); }) .verifyComplete(); } @Test() public void getNumberConfiguration() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_GET_CONFIG); Mono<NumberConfigurationResponse> mono = this.getClient().getNumberConfiguration(number); StepVerifier.create(mono) .assertNext(item -> { assertEquals("ApplicationId", item.getPstnConfiguration().getApplicationId()); }) .verifyComplete(); } @Test() public void getNumberConfigurationWithResponse() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_GET_CONFIG); Mono<Response<NumberConfigurationResponse>> mono = this.getClient().getNumberConfigurationWithResponse(number, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertEquals("ApplicationId", item.getValue().getPstnConfiguration().getApplicationId()); }) .verifyComplete(); } @Test() public void unconfigureNumber() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_UNCONFIGURE); Mono<Void> mono = this.getClient().unconfigureNumber(number); StepVerifier.create(mono).verifyComplete(); } @Test() public void unconfigureNumberWithResponse() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_UNCONFIGURE); Mono<Response<Void>> mono = this.getClient().unconfigureNumberWithResponse(number, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); }) .verifyComplete(); } @Test() public void releasePhoneNumbers() { List<PhoneNumber> phoneNumbers = new ArrayList<>(); phoneNumbers.add(new PhoneNumber(PHONENUMBER_TO_RELEASE)); Mono<ReleaseResponse> mono = this.getClient().releasePhoneNumbers(phoneNumbers); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getReleaseId()); }) .verifyComplete(); } @Test() public void releasePhoneNumbersWithResponse() { List<PhoneNumber> phoneNumbers = new ArrayList<>(); phoneNumbers.add(new PhoneNumber(PHONENUMBER_TO_RELEASE)); Mono<Response<ReleaseResponse>> mono = this.getClient().releasePhoneNumbersWithResponse(phoneNumbers, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getReleaseId()); }) .verifyComplete(); } @Test() private PhoneNumberAsyncClient getClient() { return super.getClientBuilder().buildAsyncClient(); } }
class PhoneNumberAsyncClientIntegrationTest extends PhoneNumberIntegrationTestBase { @Test() public void createAsyncPhoneNumberClientWithConnectionString() { PhoneNumberAsyncClient phoneNumberAsyncClient = getClientBuilderWithConnectionString().buildAsyncClient(); assertNotNull(phoneNumberAsyncClient); PagedFlux<AcquiredPhoneNumber> pagedFlux = phoneNumberAsyncClient.listAllPhoneNumbers(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhoneNumber()); }) .verifyComplete(); } @Test() public void listAllPhoneNumbers() { PagedFlux<AcquiredPhoneNumber> pagedFlux = this.getClient().listAllPhoneNumbers(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhoneNumber()); }) .verifyComplete(); } @Test() public void listPhonePlanGroups() { PagedFlux<PhonePlanGroup> pagedFlux = this.getClient().listPhonePlanGroups(COUNTRY_CODE, LOCALE, true); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhonePlanGroupId()); }) .verifyComplete(); } @Test() public void listPhonePlans() { PagedFlux<PhonePlan> pagedFlux = this.getClient().listPhonePlans(COUNTRY_CODE, PHONE_PLAN_GROUP_ID, LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhonePlanId()); }) .verifyComplete(); } @Test() public void listAllReleases() { PagedFlux<PhoneNumberEntity> pagedFlux = this.getClient().listAllReleases(); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getId()); }) .verifyComplete(); } @Test() public void listAllSearches() { PagedFlux<PhoneNumberEntity> pagedFlux = this.getClient().listAllSearches(); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getId()); }) .verifyComplete(); } @Test() public void listAllSupportedCountries() { PagedFlux<PhoneNumberCountry> pagedFlux = this.getClient().listAllSupportedCountries(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getCountryCode()); }) .verifyComplete(); } @Test() public void getPhonePlanLocationOptions() { Mono<LocationOptionsResponse> mono = this.getClient().getPhonePlanLocationOptions(COUNTRY_CODE, PHONE_PLAN_GROUP_ID, PHONE_PLAN_ID, LOCALE); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getLocationOptions().getLabelId()); }) .verifyComplete(); } @Test() public void getAllAreaCodes() { List<LocationOptionsQuery> locationOptions = new ArrayList<>(); LocationOptionsQuery query = new LocationOptionsQuery(); query.setLabelId("state"); query.setOptionsValue(LOCATION_OPTION_STATE); locationOptions.add(query); query = new LocationOptionsQuery(); query.setLabelId("city"); query.setOptionsValue(LOCATION_OPTION_CITY); locationOptions.add(query); Mono<AreaCodes> mono = this.getClient().getAllAreaCodes("selection", COUNTRY_CODE, PHONE_PLAN_ID, locationOptions); StepVerifier.create(mono) .assertNext(item -> { assertTrue(item.getPrimaryAreaCodes().size() > 0); }) .verifyComplete(); } @Test() public void getAllAreaCodesWithResponse() { List<LocationOptionsQuery> locationOptions = new ArrayList<>(); LocationOptionsQuery query = new LocationOptionsQuery(); query.setLabelId("state"); query.setOptionsValue(LOCATION_OPTION_STATE); locationOptions.add(query); query = new LocationOptionsQuery(); query.setLabelId("city"); query.setOptionsValue(LOCATION_OPTION_CITY); locationOptions.add(query); Mono<Response<AreaCodes>> mono = this.getClient().getAllAreaCodesWithResponse( "selection", COUNTRY_CODE, PHONE_PLAN_ID, locationOptions, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertTrue(item.getValue().getPrimaryAreaCodes().size() > 0); }) .verifyComplete(); } @Test() public void updateCapabilities() { List<Capability> capabilitiesToAdd = new ArrayList<>(); capabilitiesToAdd.add(Capability.INBOUND_CALLING); NumberUpdateCapabilities update = new NumberUpdateCapabilities(); update.setAdd(capabilitiesToAdd); Map<PhoneNumber, NumberUpdateCapabilities> updateMap = new HashMap<>(); updateMap.put(new PhoneNumber(PHONENUMBER_FOR_CAPABILITIES), update); Mono<UpdateNumberCapabilitiesResponse> mono = this.getClient().updateCapabilities(updateMap); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void updateCapabilitiesWithResponse() { List<Capability> capabilitiesToAdd = new ArrayList<>(); capabilitiesToAdd.add(Capability.INBOUND_CALLING); NumberUpdateCapabilities update = new NumberUpdateCapabilities(); update.setAdd(capabilitiesToAdd); Map<PhoneNumber, NumberUpdateCapabilities> updateMap = new HashMap<>(); updateMap.put(new PhoneNumber(PHONENUMBER_FOR_CAPABILITIES), update); Mono<Response<UpdateNumberCapabilitiesResponse>> mono = this.getClient().updateCapabilitiesWithResponse(updateMap, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void getCapabilitiesUpdate() { Mono<UpdatePhoneNumberCapabilitiesResponse> mono = this.getClient().getCapabilitiesUpdate(CAPABILITIES_ID); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void getCapabilitiesUpdateWithResponse() { Mono<Response<UpdatePhoneNumberCapabilitiesResponse>> mono = this.getClient().getCapabilitiesUpdateWithResponse(CAPABILITIES_ID, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getCapabilitiesUpdateId()); }) .verifyComplete(); } @Test() public void createSearch() { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription("testsearch20200014") .setDisplayName("testsearch20200014") .setPhonePlanIds(phonePlanIds) .setQuantity(1); Mono<CreateSearchResponse> mono = this.getClient().createSearch(createSearchOptions); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getSearchId()); }) .verifyComplete(); } @Test() public void createSearchWithResponse() { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription("testsearch20200014") .setDisplayName("testsearch20200014") .setPhonePlanIds(phonePlanIds) .setQuantity(1); Mono<Response<CreateSearchResponse>> mono = this.getClient().createSearchWithResponse(createSearchOptions, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(201, item.getStatusCode()); assertNotNull(item.getValue().getSearchId()); }) .verifyComplete(); } @Test() public void getSearchById() { Mono<PhoneNumberSearch> mono = this.getClient().getSearchById(SEARCH_ID); StepVerifier.create(mono) .assertNext(item -> { assertEquals(SEARCH_ID, item.getSearchId()); }) .verifyComplete(); } @Test() public void getSearchByIdWithResponse() { Mono<Response<PhoneNumberSearch>> mono = this.getClient().getSearchByIdWithResponse(SEARCH_ID, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertEquals(SEARCH_ID, item.getValue().getSearchId()); }) .verifyComplete(); } @Test() public void purchaseSearch() { Mono<Void> mono = this.getClient().purchaseSearch(SEARCH_ID_TO_PURCHASE); StepVerifier.create(mono).verifyComplete(); } @Test() public void purchaseSearchWithResponse() { Mono<Response<Void>> mono = this.getClient().purchaseSearchWithResponse(SEARCH_ID_TO_PURCHASE, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(202, item.getStatusCode()); }) .verifyComplete(); } @Test() public void cancelSearch() { Mono<Void> mono = this.getClient().cancelSearch(SEARCH_ID_TO_CANCEL); StepVerifier.create(mono).verifyComplete(); } @Test() public void cancelSearchWithResponse() { Mono<Response<Void>> mono = this.getClient().cancelSearchWithResponse(SEARCH_ID_TO_CANCEL, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(202, item.getStatusCode()); }) .verifyComplete(); } @Test() public void configureNumber() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_CONFIGURE); PstnConfiguration pstnConfiguration = new PstnConfiguration(); pstnConfiguration.setApplicationId("ApplicationId"); pstnConfiguration.setCallbackUrl("https: Mono<Void> mono = this.getClient().configureNumber(number, pstnConfiguration); StepVerifier.create(mono).verifyComplete(); } @Test() public void configureNumberWithResponse() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_CONFIGURE); PstnConfiguration pstnConfiguration = new PstnConfiguration(); pstnConfiguration.setApplicationId("ApplicationId"); pstnConfiguration.setCallbackUrl("https: Mono<Response<Void>> mono = this.getClient().configureNumberWithResponse(number, pstnConfiguration, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); }) .verifyComplete(); } @Test() public void getNumberConfiguration() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_GET_CONFIG); Mono<NumberConfigurationResponse> mono = this.getClient().getNumberConfiguration(number); StepVerifier.create(mono) .assertNext(item -> { assertEquals("ApplicationId", item.getPstnConfiguration().getApplicationId()); }) .verifyComplete(); } @Test() public void getNumberConfigurationWithResponse() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_GET_CONFIG); Mono<Response<NumberConfigurationResponse>> mono = this.getClient().getNumberConfigurationWithResponse(number, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertEquals("ApplicationId", item.getValue().getPstnConfiguration().getApplicationId()); }) .verifyComplete(); } @Test() public void unconfigureNumber() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_UNCONFIGURE); Mono<Void> mono = this.getClient().unconfigureNumber(number); StepVerifier.create(mono).verifyComplete(); } @Test() public void unconfigureNumberWithResponse() { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_UNCONFIGURE); Mono<Response<Void>> mono = this.getClient().unconfigureNumberWithResponse(number, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); }) .verifyComplete(); } @Test() public void releasePhoneNumbers() { List<PhoneNumber> phoneNumbers = new ArrayList<>(); phoneNumbers.add(new PhoneNumber(PHONENUMBER_TO_RELEASE)); Mono<ReleaseResponse> mono = this.getClient().releasePhoneNumbers(phoneNumbers); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getReleaseId()); }) .verifyComplete(); } @Test() public void releasePhoneNumbersWithResponse() { List<PhoneNumber> phoneNumbers = new ArrayList<>(); phoneNumbers.add(new PhoneNumber(PHONENUMBER_TO_RELEASE)); Mono<Response<ReleaseResponse>> mono = this.getClient().releasePhoneNumbersWithResponse(phoneNumbers, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getReleaseId()); }) .verifyComplete(); } @Test() private PhoneNumberAsyncClient getClient() { return super.getClientBuilder().buildAsyncClient(); } }
Is the idea to always set it in the samples?
public void beginRecognizeReceiptsFromUrlWithOptions() { String receiptUrl = "{receiptUrl}"; boolean includeFieldElements = true; formRecognizerAsyncClient.beginRecognizeReceiptsFromUrl(receiptUrl, new RecognizeReceiptsOptions() .setFieldElementsIncluded(includeFieldElements) .setLocale("en-US") .setPollInterval(Duration.ofSeconds(5))) .flatMap(AsyncPollResponse::getFinalResult) .subscribe(recognizedReceipts -> { for (int i = 0; i < recognizedReceipts.size(); i++) { RecognizedForm recognizedReceipt = recognizedReceipts.get(i); Map<String, FormField> recognizedFields = recognizedReceipt.getFields(); System.out.printf("----------- Recognized Receipt page %d -----------%n", i); FormField merchantNameField = recognizedFields.get("MerchantName"); if (merchantNameField != null) { if (FieldValueType.STRING == merchantNameField.getValue().getValueType()) { String merchantName = merchantNameField.getValue().asString(); System.out.printf("Merchant Name: %s, confidence: %.2f%n", merchantName, merchantNameField.getConfidence()); } } FormField merchantPhoneNumberField = recognizedFields.get("MerchantPhoneNumber"); if (merchantPhoneNumberField != null) { if (FieldValueType.PHONE_NUMBER == merchantPhoneNumberField.getValue().getValueType()) { String merchantAddress = merchantPhoneNumberField.getValue().asPhoneNumber(); System.out.printf("Merchant Phone number: %s, confidence: %.2f%n", merchantAddress, merchantPhoneNumberField.getConfidence()); } } FormField transactionDateField = recognizedFields.get("TransactionDate"); if (transactionDateField != null) { if (FieldValueType.DATE == transactionDateField.getValue().getValueType()) { LocalDate transactionDate = transactionDateField.getValue().asDate(); System.out.printf("Transaction Date: %s, confidence: %.2f%n", transactionDate, transactionDateField.getConfidence()); } } FormField receiptItemsField = recognizedFields.get("Items"); if (receiptItemsField != null) { System.out.printf("Receipt Items: %n"); if (FieldValueType.LIST == receiptItemsField.getValue().getValueType()) { List<FormField> receiptItems = receiptItemsField.getValue().asList(); receiptItems.stream() .filter(receiptItem -> FieldValueType.MAP == receiptItem.getValue().getValueType()) .map(formField -> formField.getValue().asMap()) .forEach(formFieldMap -> formFieldMap.forEach((key, formField) -> { if ("Quantity".equals(key)) { if (FieldValueType.FLOAT == formField.getValue().getValueType()) { Float quantity = formField.getValue().asFloat(); System.out.printf("Quantity: %f, confidence: %.2f%n", quantity, formField.getConfidence()); } } })); } } } }); }
.setPollInterval(Duration.ofSeconds(5)))
public void beginRecognizeReceiptsFromUrlWithOptions() { String receiptUrl = "{receiptUrl}"; boolean includeFieldElements = true; formRecognizerAsyncClient.beginRecognizeReceiptsFromUrl(receiptUrl, new RecognizeReceiptsOptions() .setFieldElementsIncluded(includeFieldElements) .setLocale("en-US") .setPollInterval(Duration.ofSeconds(5))) .flatMap(AsyncPollResponse::getFinalResult) .subscribe(recognizedReceipts -> { for (int i = 0; i < recognizedReceipts.size(); i++) { RecognizedForm recognizedReceipt = recognizedReceipts.get(i); Map<String, FormField> recognizedFields = recognizedReceipt.getFields(); System.out.printf("----------- Recognized Receipt page %d -----------%n", i); FormField merchantNameField = recognizedFields.get("MerchantName"); if (merchantNameField != null) { if (FieldValueType.STRING == merchantNameField.getValue().getValueType()) { String merchantName = merchantNameField.getValue().asString(); System.out.printf("Merchant Name: %s, confidence: %.2f%n", merchantName, merchantNameField.getConfidence()); } } FormField merchantPhoneNumberField = recognizedFields.get("MerchantPhoneNumber"); if (merchantPhoneNumberField != null) { if (FieldValueType.PHONE_NUMBER == merchantPhoneNumberField.getValue().getValueType()) { String merchantAddress = merchantPhoneNumberField.getValue().asPhoneNumber(); System.out.printf("Merchant Phone number: %s, confidence: %.2f%n", merchantAddress, merchantPhoneNumberField.getConfidence()); } } FormField transactionDateField = recognizedFields.get("TransactionDate"); if (transactionDateField != null) { if (FieldValueType.DATE == transactionDateField.getValue().getValueType()) { LocalDate transactionDate = transactionDateField.getValue().asDate(); System.out.printf("Transaction Date: %s, confidence: %.2f%n", transactionDate, transactionDateField.getConfidence()); } } FormField receiptItemsField = recognizedFields.get("Items"); if (receiptItemsField != null) { System.out.printf("Receipt Items: %n"); if (FieldValueType.LIST == receiptItemsField.getValue().getValueType()) { List<FormField> receiptItems = receiptItemsField.getValue().asList(); receiptItems.stream() .filter(receiptItem -> FieldValueType.MAP == receiptItem.getValue().getValueType()) .map(formField -> formField.getValue().asMap()) .forEach(formFieldMap -> formFieldMap.forEach((key, formField) -> { if ("Quantity".equals(key)) { if (FieldValueType.FLOAT == formField.getValue().getValueType()) { Float quantity = formField.getValue().asFloat(); System.out.printf("Quantity: %f, confidence: %.2f%n", quantity, formField.getConfidence()); } } })); } } } }); }
class FormRecognizerAsyncClientJavaDocCodeSnippets { FormRecognizerAsyncClient formRecognizerAsyncClient = new FormRecognizerClientBuilder().buildAsyncClient(); /** * Code snippet for creating a {@link FormRecognizerAsyncClient} */ public void createFormRecognizerAsyncClient() { FormRecognizerAsyncClient formRecognizerAsyncClient = new FormRecognizerClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("{endpoint}") .buildAsyncClient(); } /** * Code snippet for creating a {@link FormRecognizerAsyncClient} with pipeline */ public void createFormRecognizerAsyncClientWithPipeline() { HttpPipeline pipeline = new HttpPipelineBuilder() .policies(/* add policies */) .build(); FormRecognizerAsyncClient formRecognizerAsyncClient = new FormRecognizerClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("{endpoint}") .pipeline(pipeline) .buildAsyncClient(); } /** * Code snippet for {@link FormRecognizerAsyncClient */ public void beginRecognizeCustomFormsFromUrl() { String formUrl = "{form_url}"; String modelId = "{custom_trained_model_id}"; formRecognizerAsyncClient.beginRecognizeCustomFormsFromUrl(modelId, formUrl) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(recognizedForm -> recognizedForm.getFields() .forEach((fieldText, formField) -> { System.out.printf("Field text: %s%n", fieldText); System.out.printf("Field value data text: %s%n", formField.getValueData().getText()); System.out.printf("Confidence score: %.2f%n", formField.getConfidence()); })); } /** * Code snippet for * {@link FormRecognizerAsyncClient */ public void beginRecognizeCustomFormsFromUrlWithOptions() { String formUrl = "{formUrl}"; String modelId = "{model_id}"; boolean includeFieldElements = true; formRecognizerAsyncClient.beginRecognizeCustomFormsFromUrl(modelId, formUrl, new RecognizeCustomFormsOptions() .setFieldElementsIncluded(includeFieldElements) .setPollInterval(Duration.ofSeconds(10))) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(recognizedForm -> recognizedForm.getFields() .forEach((fieldText, formField) -> { System.out.printf("Field text: %s%n", fieldText); System.out.printf("Field value data text: %s%n", formField.getValueData().getText()); System.out.printf("Confidence score: %.2f%n", formField.getConfidence()); })); } /** * Code snippet for {@link FormRecognizerAsyncClient * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ public void beginRecognizeCustomForms() throws IOException { File form = new File("{local/file_path/fileName.jpg}"); String modelId = "{custom_trained_model_id}"; Flux<ByteBuffer> buffer = toFluxByteBuffer(new ByteArrayInputStream(Files.readAllBytes(form.toPath()))); formRecognizerAsyncClient.beginRecognizeCustomForms(modelId, buffer, form.length()) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(recognizedForm -> recognizedForm.getFields() .forEach((fieldText, formField) -> { System.out.printf("Field text: %s%n", fieldText); System.out.printf("Field value data text: %s%n", formField.getValueData().getText()); System.out.printf("Confidence score: %.2f%n", formField.getConfidence()); })); } /** * Code snippet for * {@link FormRecognizerAsyncClient * with options * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ public void beginRecognizeCustomFormsWithOptions() throws IOException { File form = new File("{local/file_path/fileName.jpg}"); String modelId = "{custom_trained_model_id}"; boolean includeFieldElements = true; Flux<ByteBuffer> buffer = toFluxByteBuffer(new ByteArrayInputStream(Files.readAllBytes(form.toPath()))); formRecognizerAsyncClient.beginRecognizeCustomForms(modelId, buffer, form.length(), new RecognizeCustomFormsOptions() .setContentType(FormContentType.IMAGE_JPEG) .setFieldElementsIncluded(includeFieldElements) .setPollInterval(Duration.ofSeconds(5))) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(recognizedForm -> recognizedForm.getFields() .forEach((fieldName, formField) -> { System.out.printf("Field text: %s%n", fieldName); System.out.printf("Field value data text: %s%n", formField.getValueData().getText()); System.out.printf("Confidence score: %.2f%n", formField.getConfidence()); })); } /** * Code snippet for {@link FormRecognizerAsyncClient */ public void beginRecognizeContentFromUrl() { String formUrl = "{formUrl}"; formRecognizerAsyncClient.beginRecognizeContentFromUrl(formUrl) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(formPage -> { System.out.printf("Page Angle: %s%n", formPage.getTextAngle()); System.out.printf("Page Dimension unit: %s%n", formPage.getUnit()); System.out.println("Recognized Tables: "); formPage.getTables().forEach(formTable -> formTable.getCells().forEach(recognizedTableCell -> System.out.printf("%s ", recognizedTableCell.getText()))); }); } /** * Code snippet for {@link FormRecognizerAsyncClient * options */ public void beginRecognizeContentFromUrlWithOptions() { String formUrl = "{formUrl}"; formRecognizerAsyncClient.beginRecognizeContentFromUrl(formUrl, new RecognizeContentOptions().setPollInterval(Duration.ofSeconds(5))) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(formPage -> { System.out.printf("Page Angle: %s%n", formPage.getTextAngle()); System.out.printf("Page Dimension unit: %s%n", formPage.getUnit()); System.out.println("Recognized Tables: "); formPage.getTables().forEach(formTable -> formTable.getCells().forEach(recognizedTableCell -> System.out.printf("%s ", recognizedTableCell.getText()))); }); } /** * Code snippet for {@link FormRecognizerAsyncClient * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ public void beginRecognizeContent() throws IOException { File form = new File("{local/file_path/fileName.jpg}"); Flux<ByteBuffer> buffer = toFluxByteBuffer(new ByteArrayInputStream(Files.readAllBytes(form.toPath()))); formRecognizerAsyncClient.beginRecognizeContent(buffer, form.length()) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(formPage -> { System.out.printf("Page Angle: %s%n", formPage.getTextAngle()); System.out.printf("Page Dimension unit: %s%n", formPage.getUnit()); System.out.println("Recognized Tables: "); formPage.getTables().forEach(formTable -> formTable.getCells().forEach(recognizedTableCell -> System.out.printf("%s ", recognizedTableCell.getText()))); }); } /** * Code snippet for {@link FormRecognizerAsyncClient * options * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ public void beginRecognizeContentWithOptions() throws IOException { File form = new File("{local/file_path/fileName.jpg}"); Flux<ByteBuffer> buffer = toFluxByteBuffer(new ByteArrayInputStream(Files.readAllBytes(form.toPath()))); formRecognizerAsyncClient.beginRecognizeContent(buffer, form.length(), new RecognizeContentOptions() .setContentType(FormContentType.APPLICATION_PDF) .setPollInterval(Duration.ofSeconds(5))) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(formPage -> { System.out.printf("Page Angle: %s%n", formPage.getTextAngle()); System.out.printf("Page Dimension unit: %s%n", formPage.getUnit()); System.out.println("Recognized Tables: "); formPage.getTables().forEach(formTable -> formTable.getCells().forEach(recognizedTableCell -> System.out.printf("%s ", recognizedTableCell.getText()))); }); } /** * Code snippet for {@link FormRecognizerAsyncClient */ public void beginRecognizeReceiptsFromUrl() { String receiptUrl = "{receiptUrl}"; formRecognizerAsyncClient.beginRecognizeReceiptsFromUrl(receiptUrl) .flatMap(AsyncPollResponse::getFinalResult) .subscribe(recognizedReceipts -> { for (int i = 0; i < recognizedReceipts.size(); i++) { RecognizedForm recognizedForm = recognizedReceipts.get(i); Map<String, FormField> recognizedFields = recognizedForm.getFields(); System.out.printf("----------- Recognized Receipt page %d -----------%n", i); FormField merchantNameField = recognizedFields.get("MerchantName"); if (merchantNameField != null) { if (FieldValueType.STRING == merchantNameField.getValue().getValueType()) { String merchantName = merchantNameField.getValue().asString(); System.out.printf("Merchant Name: %s, confidence: %.2f%n", merchantName, merchantNameField.getConfidence()); } } FormField merchantPhoneNumberField = recognizedFields.get("MerchantPhoneNumber"); if (merchantPhoneNumberField != null) { if (FieldValueType.PHONE_NUMBER == merchantPhoneNumberField.getValue().getValueType()) { String merchantAddress = merchantPhoneNumberField.getValue().asPhoneNumber(); System.out.printf("Merchant Phone number: %s, confidence: %.2f%n", merchantAddress, merchantPhoneNumberField.getConfidence()); } } FormField transactionDateField = recognizedFields.get("TransactionDate"); if (transactionDateField != null) { if (FieldValueType.DATE == transactionDateField.getValue().getValueType()) { LocalDate transactionDate = transactionDateField.getValue().asDate(); System.out.printf("Transaction Date: %s, confidence: %.2f%n", transactionDate, transactionDateField.getConfidence()); } } FormField receiptItemsField = recognizedFields.get("Items"); if (receiptItemsField != null) { System.out.printf("Receipt Items: %n"); if (FieldValueType.LIST == receiptItemsField.getValue().getValueType()) { List<FormField> receiptItems = receiptItemsField.getValue().asList(); receiptItems.stream() .filter(receiptItem -> FieldValueType.MAP == receiptItem.getValue().getValueType()) .map(formField -> formField.getValue().asMap()) .forEach(formFieldMap -> formFieldMap.forEach((key, formField) -> { if ("Quantity".equals(key)) { if (FieldValueType.FLOAT == formField.getValue().getValueType()) { Float quantity = formField.getValue().asFloat(); System.out.printf("Quantity: %f, confidence: %.2f%n", quantity, formField.getConfidence()); } } })); } } } }); } /** * Code snippet for {@link FormRecognizerAsyncClient */ /** * Code snippet for {@link FormRecognizerAsyncClient * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ public void beginRecognizeReceipts() throws IOException { File receipt = new File("{file_source_url}"); Flux<ByteBuffer> buffer = toFluxByteBuffer(new ByteArrayInputStream(Files.readAllBytes(receipt.toPath()))); formRecognizerAsyncClient.beginRecognizeReceipts(buffer, receipt.length()) .flatMap(AsyncPollResponse::getFinalResult) .subscribe(recognizedReceipts -> { for (int i = 0; i < recognizedReceipts.size(); i++) { RecognizedForm recognizedForm = recognizedReceipts.get(i); Map<String, FormField> recognizedFields = recognizedForm.getFields(); System.out.printf("----------- Recognized Receipt page %d -----------%n", i); FormField merchantNameField = recognizedFields.get("MerchantName"); if (merchantNameField != null) { if (FieldValueType.STRING == merchantNameField.getValue().getValueType()) { String merchantName = merchantNameField.getValue().asString(); System.out.printf("Merchant Name: %s, confidence: %.2f%n", merchantName, merchantNameField.getConfidence()); } } FormField merchantPhoneNumberField = recognizedFields.get("MerchantPhoneNumber"); if (merchantPhoneNumberField != null) { if (FieldValueType.PHONE_NUMBER == merchantPhoneNumberField.getValue().getValueType()) { String merchantAddress = merchantPhoneNumberField.getValue().asPhoneNumber(); System.out.printf("Merchant Phone number: %s, confidence: %.2f%n", merchantAddress, merchantPhoneNumberField.getConfidence()); } } FormField transactionDateField = recognizedFields.get("TransactionDate"); if (transactionDateField != null) { if (FieldValueType.DATE == transactionDateField.getValue().getValueType()) { LocalDate transactionDate = transactionDateField.getValue().asDate(); System.out.printf("Transaction Date: %s, confidence: %.2f%n", transactionDate, transactionDateField.getConfidence()); } } FormField receiptItemsField = recognizedFields.get("Items"); if (receiptItemsField != null) { System.out.printf("Receipt Items: %n"); if (FieldValueType.LIST == receiptItemsField.getValue().getValueType()) { List<FormField> receiptItems = receiptItemsField.getValue().asList(); receiptItems.stream() .filter(receiptItem -> FieldValueType.MAP == receiptItem.getValue().getValueType()) .map(formField -> formField.getValue().asMap()) .forEach(formFieldMap -> formFieldMap.forEach((key, formField) -> { if ("Quantity".equals(key)) { if (FieldValueType.FLOAT == formField.getValue().getValueType()) { Float quantity = formField.getValue().asFloat(); System.out.printf("Quantity: %f, confidence: %.2f%n", quantity, formField.getConfidence()); } } })); } } } }); } /** * Code snippet for {@link FormRecognizerAsyncClient * options * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ public void beginRecognizeReceiptsWithOptions() throws IOException { File receipt = new File("{local/file_path/fileName.jpg}"); boolean includeFieldElements = true; Flux<ByteBuffer> buffer = toFluxByteBuffer(new ByteArrayInputStream(Files.readAllBytes(receipt.toPath()))); formRecognizerAsyncClient.beginRecognizeReceipts(buffer, receipt.length(), new RecognizeReceiptsOptions() .setContentType(FormContentType.IMAGE_JPEG) .setFieldElementsIncluded(includeFieldElements) .setLocale("en-US") .setPollInterval(Duration.ofSeconds(5))) .flatMap(AsyncPollResponse::getFinalResult) .subscribe(recognizedReceipts -> { for (int i = 0; i < recognizedReceipts.size(); i++) { RecognizedForm recognizedForm = recognizedReceipts.get(i); Map<String, FormField> recognizedFields = recognizedForm.getFields(); System.out.printf("----------- Recognized Receipt page %d -----------%n", i); FormField merchantNameField = recognizedFields.get("MerchantName"); if (merchantNameField != null) { if (FieldValueType.STRING == merchantNameField.getValue().getValueType()) { String merchantName = merchantNameField.getValue().asString(); System.out.printf("Merchant Name: %s, confidence: %.2f%n", merchantName, merchantNameField.getConfidence()); } } FormField merchantPhoneNumberField = recognizedFields.get("MerchantPhoneNumber"); if (merchantPhoneNumberField != null) { if (FieldValueType.PHONE_NUMBER == merchantPhoneNumberField.getValue().getValueType()) { String merchantAddress = merchantPhoneNumberField.getValue().asPhoneNumber(); System.out.printf("Merchant Phone number: %s, confidence: %.2f%n", merchantAddress, merchantPhoneNumberField.getConfidence()); } } FormField transactionDateField = recognizedFields.get("TransactionDate"); if (transactionDateField != null) { if (FieldValueType.DATE == transactionDateField.getValue().getValueType()) { LocalDate transactionDate = transactionDateField.getValue().asDate(); System.out.printf("Transaction Date: %s, confidence: %.2f%n", transactionDate, transactionDateField.getConfidence()); } } FormField receiptItemsField = recognizedFields.get("Items"); if (receiptItemsField != null) { System.out.printf("Receipt Items: %n"); if (FieldValueType.LIST == receiptItemsField.getValue().getValueType()) { List<FormField> receiptItems = receiptItemsField.getValue().asList(); receiptItems.stream() .filter(receiptItem -> FieldValueType.MAP == receiptItem.getValue().getValueType()) .map(formField -> formField.getValue().asMap()) .forEach(formFieldMap -> formFieldMap.forEach((key, formField) -> { if ("Quantity".equals(key)) { if (FieldValueType.FLOAT == formField.getValue().getValueType()) { Float quantity = formField.getValue().asFloat(); System.out.printf("Quantity: %f, confidence: %.2f%n", quantity, formField.getConfidence()); } } })); } } } }); } }
class FormRecognizerAsyncClientJavaDocCodeSnippets { FormRecognizerAsyncClient formRecognizerAsyncClient = new FormRecognizerClientBuilder().buildAsyncClient(); /** * Code snippet for creating a {@link FormRecognizerAsyncClient} */ public void createFormRecognizerAsyncClient() { FormRecognizerAsyncClient formRecognizerAsyncClient = new FormRecognizerClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("{endpoint}") .buildAsyncClient(); } /** * Code snippet for creating a {@link FormRecognizerAsyncClient} with pipeline */ public void createFormRecognizerAsyncClientWithPipeline() { HttpPipeline pipeline = new HttpPipelineBuilder() .policies(/* add policies */) .build(); FormRecognizerAsyncClient formRecognizerAsyncClient = new FormRecognizerClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("{endpoint}") .pipeline(pipeline) .buildAsyncClient(); } /** * Code snippet for {@link FormRecognizerAsyncClient */ public void beginRecognizeCustomFormsFromUrl() { String formUrl = "{form_url}"; String modelId = "{custom_trained_model_id}"; formRecognizerAsyncClient.beginRecognizeCustomFormsFromUrl(modelId, formUrl) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(recognizedForm -> recognizedForm.getFields() .forEach((fieldText, formField) -> { System.out.printf("Field text: %s%n", fieldText); System.out.printf("Field value data text: %s%n", formField.getValueData().getText()); System.out.printf("Confidence score: %.2f%n", formField.getConfidence()); })); } /** * Code snippet for * {@link FormRecognizerAsyncClient */ public void beginRecognizeCustomFormsFromUrlWithOptions() { String formUrl = "{formUrl}"; String modelId = "{model_id}"; boolean includeFieldElements = true; formRecognizerAsyncClient.beginRecognizeCustomFormsFromUrl(modelId, formUrl, new RecognizeCustomFormsOptions() .setFieldElementsIncluded(includeFieldElements) .setPollInterval(Duration.ofSeconds(10))) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(recognizedForm -> recognizedForm.getFields() .forEach((fieldText, formField) -> { System.out.printf("Field text: %s%n", fieldText); System.out.printf("Field value data text: %s%n", formField.getValueData().getText()); System.out.printf("Confidence score: %.2f%n", formField.getConfidence()); })); } /** * Code snippet for {@link FormRecognizerAsyncClient * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ public void beginRecognizeCustomForms() throws IOException { File form = new File("{local/file_path/fileName.jpg}"); String modelId = "{custom_trained_model_id}"; Flux<ByteBuffer> buffer = toFluxByteBuffer(new ByteArrayInputStream(Files.readAllBytes(form.toPath()))); formRecognizerAsyncClient.beginRecognizeCustomForms(modelId, buffer, form.length()) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(recognizedForm -> recognizedForm.getFields() .forEach((fieldText, formField) -> { System.out.printf("Field text: %s%n", fieldText); System.out.printf("Field value data text: %s%n", formField.getValueData().getText()); System.out.printf("Confidence score: %.2f%n", formField.getConfidence()); })); } /** * Code snippet for * {@link FormRecognizerAsyncClient * with options * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ public void beginRecognizeCustomFormsWithOptions() throws IOException { File form = new File("{local/file_path/fileName.jpg}"); String modelId = "{custom_trained_model_id}"; boolean includeFieldElements = true; Flux<ByteBuffer> buffer = toFluxByteBuffer(new ByteArrayInputStream(Files.readAllBytes(form.toPath()))); formRecognizerAsyncClient.beginRecognizeCustomForms(modelId, buffer, form.length(), new RecognizeCustomFormsOptions() .setContentType(FormContentType.IMAGE_JPEG) .setFieldElementsIncluded(includeFieldElements) .setPollInterval(Duration.ofSeconds(5))) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(recognizedForm -> recognizedForm.getFields() .forEach((fieldName, formField) -> { System.out.printf("Field text: %s%n", fieldName); System.out.printf("Field value data text: %s%n", formField.getValueData().getText()); System.out.printf("Confidence score: %.2f%n", formField.getConfidence()); })); } /** * Code snippet for {@link FormRecognizerAsyncClient */ public void beginRecognizeContentFromUrl() { String formUrl = "{formUrl}"; formRecognizerAsyncClient.beginRecognizeContentFromUrl(formUrl) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(formPage -> { System.out.printf("Page Angle: %s%n", formPage.getTextAngle()); System.out.printf("Page Dimension unit: %s%n", formPage.getUnit()); System.out.println("Recognized Tables: "); formPage.getTables().forEach(formTable -> formTable.getCells().forEach(recognizedTableCell -> System.out.printf("%s ", recognizedTableCell.getText()))); }); } /** * Code snippet for {@link FormRecognizerAsyncClient * options */ public void beginRecognizeContentFromUrlWithOptions() { String formUrl = "{formUrl}"; formRecognizerAsyncClient.beginRecognizeContentFromUrl(formUrl, new RecognizeContentOptions().setPollInterval(Duration.ofSeconds(5))) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(formPage -> { System.out.printf("Page Angle: %s%n", formPage.getTextAngle()); System.out.printf("Page Dimension unit: %s%n", formPage.getUnit()); System.out.println("Recognized Tables: "); formPage.getTables().forEach(formTable -> formTable.getCells().forEach(recognizedTableCell -> System.out.printf("%s ", recognizedTableCell.getText()))); }); } /** * Code snippet for {@link FormRecognizerAsyncClient * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ public void beginRecognizeContent() throws IOException { File form = new File("{local/file_path/fileName.jpg}"); Flux<ByteBuffer> buffer = toFluxByteBuffer(new ByteArrayInputStream(Files.readAllBytes(form.toPath()))); formRecognizerAsyncClient.beginRecognizeContent(buffer, form.length()) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(formPage -> { System.out.printf("Page Angle: %s%n", formPage.getTextAngle()); System.out.printf("Page Dimension unit: %s%n", formPage.getUnit()); System.out.println("Recognized Tables: "); formPage.getTables().forEach(formTable -> formTable.getCells().forEach(recognizedTableCell -> System.out.printf("%s ", recognizedTableCell.getText()))); }); } /** * Code snippet for {@link FormRecognizerAsyncClient * options * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ public void beginRecognizeContentWithOptions() throws IOException { File form = new File("{local/file_path/fileName.jpg}"); Flux<ByteBuffer> buffer = toFluxByteBuffer(new ByteArrayInputStream(Files.readAllBytes(form.toPath()))); formRecognizerAsyncClient.beginRecognizeContent(buffer, form.length(), new RecognizeContentOptions() .setContentType(FormContentType.APPLICATION_PDF) .setPollInterval(Duration.ofSeconds(5))) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(formPage -> { System.out.printf("Page Angle: %s%n", formPage.getTextAngle()); System.out.printf("Page Dimension unit: %s%n", formPage.getUnit()); System.out.println("Recognized Tables: "); formPage.getTables().forEach(formTable -> formTable.getCells().forEach(recognizedTableCell -> System.out.printf("%s ", recognizedTableCell.getText()))); }); } /** * Code snippet for {@link FormRecognizerAsyncClient */ public void beginRecognizeReceiptsFromUrl() { String receiptUrl = "{receiptUrl}"; formRecognizerAsyncClient.beginRecognizeReceiptsFromUrl(receiptUrl) .flatMap(AsyncPollResponse::getFinalResult) .subscribe(recognizedReceipts -> { for (int i = 0; i < recognizedReceipts.size(); i++) { RecognizedForm recognizedForm = recognizedReceipts.get(i); Map<String, FormField> recognizedFields = recognizedForm.getFields(); System.out.printf("----------- Recognized Receipt page %d -----------%n", i); FormField merchantNameField = recognizedFields.get("MerchantName"); if (merchantNameField != null) { if (FieldValueType.STRING == merchantNameField.getValue().getValueType()) { String merchantName = merchantNameField.getValue().asString(); System.out.printf("Merchant Name: %s, confidence: %.2f%n", merchantName, merchantNameField.getConfidence()); } } FormField merchantPhoneNumberField = recognizedFields.get("MerchantPhoneNumber"); if (merchantPhoneNumberField != null) { if (FieldValueType.PHONE_NUMBER == merchantPhoneNumberField.getValue().getValueType()) { String merchantAddress = merchantPhoneNumberField.getValue().asPhoneNumber(); System.out.printf("Merchant Phone number: %s, confidence: %.2f%n", merchantAddress, merchantPhoneNumberField.getConfidence()); } } FormField transactionDateField = recognizedFields.get("TransactionDate"); if (transactionDateField != null) { if (FieldValueType.DATE == transactionDateField.getValue().getValueType()) { LocalDate transactionDate = transactionDateField.getValue().asDate(); System.out.printf("Transaction Date: %s, confidence: %.2f%n", transactionDate, transactionDateField.getConfidence()); } } FormField receiptItemsField = recognizedFields.get("Items"); if (receiptItemsField != null) { System.out.printf("Receipt Items: %n"); if (FieldValueType.LIST == receiptItemsField.getValue().getValueType()) { List<FormField> receiptItems = receiptItemsField.getValue().asList(); receiptItems.stream() .filter(receiptItem -> FieldValueType.MAP == receiptItem.getValue().getValueType()) .map(formField -> formField.getValue().asMap()) .forEach(formFieldMap -> formFieldMap.forEach((key, formField) -> { if ("Quantity".equals(key)) { if (FieldValueType.FLOAT == formField.getValue().getValueType()) { Float quantity = formField.getValue().asFloat(); System.out.printf("Quantity: %f, confidence: %.2f%n", quantity, formField.getConfidence()); } } })); } } } }); } /** * Code snippet for {@link FormRecognizerAsyncClient */ /** * Code snippet for {@link FormRecognizerAsyncClient * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ public void beginRecognizeReceipts() throws IOException { File receipt = new File("{file_source_url}"); Flux<ByteBuffer> buffer = toFluxByteBuffer(new ByteArrayInputStream(Files.readAllBytes(receipt.toPath()))); formRecognizerAsyncClient.beginRecognizeReceipts(buffer, receipt.length()) .flatMap(AsyncPollResponse::getFinalResult) .subscribe(recognizedReceipts -> { for (int i = 0; i < recognizedReceipts.size(); i++) { RecognizedForm recognizedForm = recognizedReceipts.get(i); Map<String, FormField> recognizedFields = recognizedForm.getFields(); System.out.printf("----------- Recognized Receipt page %d -----------%n", i); FormField merchantNameField = recognizedFields.get("MerchantName"); if (merchantNameField != null) { if (FieldValueType.STRING == merchantNameField.getValue().getValueType()) { String merchantName = merchantNameField.getValue().asString(); System.out.printf("Merchant Name: %s, confidence: %.2f%n", merchantName, merchantNameField.getConfidence()); } } FormField merchantPhoneNumberField = recognizedFields.get("MerchantPhoneNumber"); if (merchantPhoneNumberField != null) { if (FieldValueType.PHONE_NUMBER == merchantPhoneNumberField.getValue().getValueType()) { String merchantAddress = merchantPhoneNumberField.getValue().asPhoneNumber(); System.out.printf("Merchant Phone number: %s, confidence: %.2f%n", merchantAddress, merchantPhoneNumberField.getConfidence()); } } FormField transactionDateField = recognizedFields.get("TransactionDate"); if (transactionDateField != null) { if (FieldValueType.DATE == transactionDateField.getValue().getValueType()) { LocalDate transactionDate = transactionDateField.getValue().asDate(); System.out.printf("Transaction Date: %s, confidence: %.2f%n", transactionDate, transactionDateField.getConfidence()); } } FormField receiptItemsField = recognizedFields.get("Items"); if (receiptItemsField != null) { System.out.printf("Receipt Items: %n"); if (FieldValueType.LIST == receiptItemsField.getValue().getValueType()) { List<FormField> receiptItems = receiptItemsField.getValue().asList(); receiptItems.stream() .filter(receiptItem -> FieldValueType.MAP == receiptItem.getValue().getValueType()) .map(formField -> formField.getValue().asMap()) .forEach(formFieldMap -> formFieldMap.forEach((key, formField) -> { if ("Quantity".equals(key)) { if (FieldValueType.FLOAT == formField.getValue().getValueType()) { Float quantity = formField.getValue().asFloat(); System.out.printf("Quantity: %f, confidence: %.2f%n", quantity, formField.getConfidence()); } } })); } } } }); } /** * Code snippet for {@link FormRecognizerAsyncClient * options * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ public void beginRecognizeReceiptsWithOptions() throws IOException { File receipt = new File("{local/file_path/fileName.jpg}"); boolean includeFieldElements = true; Flux<ByteBuffer> buffer = toFluxByteBuffer(new ByteArrayInputStream(Files.readAllBytes(receipt.toPath()))); formRecognizerAsyncClient.beginRecognizeReceipts(buffer, receipt.length(), new RecognizeReceiptsOptions() .setContentType(FormContentType.IMAGE_JPEG) .setFieldElementsIncluded(includeFieldElements) .setLocale("en-US") .setPollInterval(Duration.ofSeconds(5))) .flatMap(AsyncPollResponse::getFinalResult) .subscribe(recognizedReceipts -> { for (int i = 0; i < recognizedReceipts.size(); i++) { RecognizedForm recognizedForm = recognizedReceipts.get(i); Map<String, FormField> recognizedFields = recognizedForm.getFields(); System.out.printf("----------- Recognized Receipt page %d -----------%n", i); FormField merchantNameField = recognizedFields.get("MerchantName"); if (merchantNameField != null) { if (FieldValueType.STRING == merchantNameField.getValue().getValueType()) { String merchantName = merchantNameField.getValue().asString(); System.out.printf("Merchant Name: %s, confidence: %.2f%n", merchantName, merchantNameField.getConfidence()); } } FormField merchantPhoneNumberField = recognizedFields.get("MerchantPhoneNumber"); if (merchantPhoneNumberField != null) { if (FieldValueType.PHONE_NUMBER == merchantPhoneNumberField.getValue().getValueType()) { String merchantAddress = merchantPhoneNumberField.getValue().asPhoneNumber(); System.out.printf("Merchant Phone number: %s, confidence: %.2f%n", merchantAddress, merchantPhoneNumberField.getConfidence()); } } FormField transactionDateField = recognizedFields.get("TransactionDate"); if (transactionDateField != null) { if (FieldValueType.DATE == transactionDateField.getValue().getValueType()) { LocalDate transactionDate = transactionDateField.getValue().asDate(); System.out.printf("Transaction Date: %s, confidence: %.2f%n", transactionDate, transactionDateField.getConfidence()); } } FormField receiptItemsField = recognizedFields.get("Items"); if (receiptItemsField != null) { System.out.printf("Receipt Items: %n"); if (FieldValueType.LIST == receiptItemsField.getValue().getValueType()) { List<FormField> receiptItems = receiptItemsField.getValue().asList(); receiptItems.stream() .filter(receiptItem -> FieldValueType.MAP == receiptItem.getValue().getValueType()) .map(formField -> formField.getValue().asMap()) .forEach(formFieldMap -> formFieldMap.forEach((key, formField) -> { if ("Quantity".equals(key)) { if (FieldValueType.FLOAT == formField.getValue().getValueType()) { Float quantity = formField.getValue().asFloat(); System.out.printf("Quantity: %f, confidence: %.2f%n", quantity, formField.getConfidence()); } } })); } } } }); } /** * Code snippet for {@link FormRecognizerAsyncClient */ public void beginRecognizeBusinessCardsFromUrl() { String businessCardUrl = "{business_card_url}"; formRecognizerAsyncClient.beginRecognizeBusinessCardsFromUrl(businessCardUrl) .flatMap(AsyncPollResponse::getFinalResult) .subscribe(recognizedBusinessCards -> { for (int i = 0; i < recognizedBusinessCards.size(); i++) { RecognizedForm recognizedForm = recognizedBusinessCards.get(i); Map<String, FormField> recognizedFields = recognizedForm.getFields(); System.out.printf("----------- Recognized Business Card page %d -----------%n", i); FormField contactNamesFormField = recognizedFields.get("ContactNames"); if (contactNamesFormField != null) { if (FieldValueType.LIST == contactNamesFormField.getValue().getValueType()) { List<FormField> contactNamesList = contactNamesFormField.getValue().asList(); contactNamesList.stream() .filter(contactName -> FieldValueType.MAP == contactName.getValue().getValueType()) .map(contactName -> { System.out.printf("Contact name: %s%n", contactName.getValueData().getText()); return contactName.getValue().asMap(); }) .forEach(contactNamesMap -> contactNamesMap.forEach((key, contactName) -> { if ("FirstName".equals(key)) { if (FieldValueType.STRING == contactName.getValue().getValueType()) { String firstName = contactName.getValue().asString(); System.out.printf("\tFirst Name: %s, confidence: %.2f%n", firstName, contactName.getConfidence()); } } if ("LastName".equals(key)) { if (FieldValueType.STRING == contactName.getValue().getValueType()) { String lastName = contactName.getValue().asString(); System.out.printf("\tLast Name: %s, confidence: %.2f%n", lastName, contactName.getConfidence()); } } })); } } FormField jobTitles = recognizedFields.get("JobTitles"); if (jobTitles != null) { if (FieldValueType.LIST == jobTitles.getValue().getValueType()) { List<FormField> jobTitlesItems = jobTitles.getValue().asList(); jobTitlesItems.stream().forEach(jobTitlesItem -> { if (FieldValueType.STRING == jobTitlesItem.getValue().getValueType()) { String jobTitle = jobTitlesItem.getValue().asString(); System.out.printf("Job Title: %s, confidence: %.2f%n", jobTitle, jobTitlesItem.getConfidence()); } }); } } } }); } /** * Code snippet for * {@link FormRecognizerAsyncClient */ public void beginRecognizeBusinessCardsFromUrlWithOptions() { String businessCardUrl = "{business_card_url}"; boolean includeFieldElements = true; formRecognizerAsyncClient.beginRecognizeBusinessCardsFromUrl(businessCardUrl, new RecognizeBusinessCardsOptions() .setFieldElementsIncluded(includeFieldElements) .setPollInterval(Duration.ofSeconds(5))) .flatMap(AsyncPollResponse::getFinalResult) .subscribe(recognizedBusinessCards -> { for (int i = 0; i < recognizedBusinessCards.size(); i++) { RecognizedForm recognizedBusinessCard = recognizedBusinessCards.get(i); Map<String, FormField> recognizedFields = recognizedBusinessCard.getFields(); System.out.printf("----------- Recognized Business Card page %d -----------%n", i); FormField contactNamesFormField = recognizedFields.get("ContactNames"); if (contactNamesFormField != null) { if (FieldValueType.LIST == contactNamesFormField.getValue().getValueType()) { List<FormField> contactNamesList = contactNamesFormField.getValue().asList(); contactNamesList.stream() .filter(contactName -> FieldValueType.MAP == contactName.getValue().getValueType()) .map(contactName -> { System.out.printf("Contact name: %s%n", contactName.getValueData().getText()); return contactName.getValue().asMap(); }) .forEach(contactNamesMap -> contactNamesMap.forEach((key, contactName) -> { if ("FirstName".equals(key)) { if (FieldValueType.STRING == contactName.getValue().getValueType()) { String firstName = contactName.getValue().asString(); System.out.printf("\tFirst Name: %s, confidence: %.2f%n", firstName, contactName.getConfidence()); } } if ("LastName".equals(key)) { if (FieldValueType.STRING == contactName.getValue().getValueType()) { String lastName = contactName.getValue().asString(); System.out.printf("\tLast Name: %s, confidence: %.2f%n", lastName, contactName.getConfidence()); } } })); } } FormField jobTitles = recognizedFields.get("JobTitles"); if (jobTitles != null) { if (FieldValueType.LIST == jobTitles.getValue().getValueType()) { List<FormField> jobTitlesItems = jobTitles.getValue().asList(); jobTitlesItems.stream().forEach(jobTitlesItem -> { if (FieldValueType.STRING == jobTitlesItem.getValue().getValueType()) { String jobTitle = jobTitlesItem.getValue().asString(); System.out.printf("Job Title: %s, confidence: %.2f%n", jobTitle, jobTitlesItem.getConfidence()); } }); } } } }); } /** * Code snippet for {@link FormRecognizerAsyncClient * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ public void beginRecognizeBusinessCards() throws IOException { File businessCard = new File("{local/file_path/fileName.jpg}"); Flux<ByteBuffer> buffer = toFluxByteBuffer(new ByteArrayInputStream(Files.readAllBytes(businessCard.toPath()))); formRecognizerAsyncClient.beginRecognizeBusinessCards(buffer, businessCard.length()) .flatMap(AsyncPollResponse::getFinalResult) .subscribe(recognizedBusinessCards -> { for (int i = 0; i < recognizedBusinessCards.size(); i++) { RecognizedForm recognizedForm = recognizedBusinessCards.get(i); Map<String, FormField> recognizedFields = recognizedForm.getFields(); System.out.printf("----------- Recognized Business Card page %d -----------%n", i); FormField contactNamesFormField = recognizedFields.get("ContactNames"); if (contactNamesFormField != null) { if (FieldValueType.LIST == contactNamesFormField.getValue().getValueType()) { List<FormField> contactNamesList = contactNamesFormField.getValue().asList(); contactNamesList.stream() .filter(contactName -> FieldValueType.MAP == contactName.getValue().getValueType()) .map(contactName -> { System.out.printf("Contact name: %s%n", contactName.getValueData().getText()); return contactName.getValue().asMap(); }) .forEach(contactNamesMap -> contactNamesMap.forEach((key, contactName) -> { if ("FirstName".equals(key)) { if (FieldValueType.STRING == contactName.getValue().getValueType()) { String firstName = contactName.getValue().asString(); System.out.printf("\tFirst Name: %s, confidence: %.2f%n", firstName, contactName.getConfidence()); } } if ("LastName".equals(key)) { if (FieldValueType.STRING == contactName.getValue().getValueType()) { String lastName = contactName.getValue().asString(); System.out.printf("\tLast Name: %s, confidence: %.2f%n", lastName, contactName.getConfidence()); } } })); } } FormField jobTitles = recognizedFields.get("JobTitles"); if (jobTitles != null) { if (FieldValueType.LIST == jobTitles.getValue().getValueType()) { List<FormField> jobTitlesItems = jobTitles.getValue().asList(); jobTitlesItems.stream().forEach(jobTitlesItem -> { if (FieldValueType.STRING == jobTitlesItem.getValue().getValueType()) { String jobTitle = jobTitlesItem.getValue().asString(); System.out.printf("Job Title: %s, confidence: %.2f%n", jobTitle, jobTitlesItem.getConfidence()); } }); } } } }); } /** * Code snippet for * {@link FormRecognizerAsyncClient * options * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ public void beginRecognizeBusinessCardsWithOptions() throws IOException { File businessCard = new File("{local/file_path/fileName.jpg}"); boolean includeFieldElements = true; Flux<ByteBuffer> buffer = toFluxByteBuffer(new ByteArrayInputStream(Files.readAllBytes(businessCard.toPath()))); formRecognizerAsyncClient.beginRecognizeBusinessCards(buffer, businessCard.length(), new RecognizeBusinessCardsOptions() .setContentType(FormContentType.IMAGE_JPEG) .setFieldElementsIncluded(includeFieldElements) .setPollInterval(Duration.ofSeconds(5))) .flatMap(AsyncPollResponse::getFinalResult) .subscribe(recognizedBusinessCards -> { for (int i = 0; i < recognizedBusinessCards.size(); i++) { RecognizedForm recognizedForm = recognizedBusinessCards.get(i); Map<String, FormField> recognizedFields = recognizedForm.getFields(); System.out.printf("----------- Recognized Business Card page %d -----------%n", i); FormField contactNamesFormField = recognizedFields.get("ContactNames"); if (contactNamesFormField != null) { if (FieldValueType.LIST == contactNamesFormField.getValue().getValueType()) { List<FormField> contactNamesList = contactNamesFormField.getValue().asList(); contactNamesList.stream() .filter(contactName -> FieldValueType.MAP == contactName.getValue().getValueType()) .map(contactName -> { System.out.printf("Contact name: %s%n", contactName.getValueData().getText()); return contactName.getValue().asMap(); }) .forEach(contactNamesMap -> contactNamesMap.forEach((key, contactName) -> { if ("FirstName".equals(key)) { if (FieldValueType.STRING == contactName.getValue().getValueType()) { String firstName = contactName.getValue().asString(); System.out.printf("\tFirst Name: %s, confidence: %.2f%n", firstName, contactName.getConfidence()); } } if ("LastName".equals(key)) { if (FieldValueType.STRING == contactName.getValue().getValueType()) { String lastName = contactName.getValue().asString(); System.out.printf("\tLast Name: %s, confidence: %.2f%n", lastName, contactName.getConfidence()); } } })); } } FormField jobTitles = recognizedFields.get("JobTitles"); if (jobTitles != null) { if (FieldValueType.LIST == jobTitles.getValue().getValueType()) { List<FormField> jobTitlesItems = jobTitles.getValue().asList(); jobTitlesItems.stream().forEach(jobTitlesItem -> { if (FieldValueType.STRING == jobTitlesItem.getValue().getValueType()) { String jobTitle = jobTitlesItem.getValue().asString(); System.out.printf("Job Title: %s, confidence: %.2f%n", jobTitle, jobTitlesItem.getConfidence()); } }); } } } }); } }
I just wanted to showcase this feature and thought samples would be a good way to do it.
public void beginRecognizeReceiptsFromUrlWithOptions() { String receiptUrl = "{receiptUrl}"; boolean includeFieldElements = true; formRecognizerAsyncClient.beginRecognizeReceiptsFromUrl(receiptUrl, new RecognizeReceiptsOptions() .setFieldElementsIncluded(includeFieldElements) .setLocale("en-US") .setPollInterval(Duration.ofSeconds(5))) .flatMap(AsyncPollResponse::getFinalResult) .subscribe(recognizedReceipts -> { for (int i = 0; i < recognizedReceipts.size(); i++) { RecognizedForm recognizedReceipt = recognizedReceipts.get(i); Map<String, FormField> recognizedFields = recognizedReceipt.getFields(); System.out.printf("----------- Recognized Receipt page %d -----------%n", i); FormField merchantNameField = recognizedFields.get("MerchantName"); if (merchantNameField != null) { if (FieldValueType.STRING == merchantNameField.getValue().getValueType()) { String merchantName = merchantNameField.getValue().asString(); System.out.printf("Merchant Name: %s, confidence: %.2f%n", merchantName, merchantNameField.getConfidence()); } } FormField merchantPhoneNumberField = recognizedFields.get("MerchantPhoneNumber"); if (merchantPhoneNumberField != null) { if (FieldValueType.PHONE_NUMBER == merchantPhoneNumberField.getValue().getValueType()) { String merchantAddress = merchantPhoneNumberField.getValue().asPhoneNumber(); System.out.printf("Merchant Phone number: %s, confidence: %.2f%n", merchantAddress, merchantPhoneNumberField.getConfidence()); } } FormField transactionDateField = recognizedFields.get("TransactionDate"); if (transactionDateField != null) { if (FieldValueType.DATE == transactionDateField.getValue().getValueType()) { LocalDate transactionDate = transactionDateField.getValue().asDate(); System.out.printf("Transaction Date: %s, confidence: %.2f%n", transactionDate, transactionDateField.getConfidence()); } } FormField receiptItemsField = recognizedFields.get("Items"); if (receiptItemsField != null) { System.out.printf("Receipt Items: %n"); if (FieldValueType.LIST == receiptItemsField.getValue().getValueType()) { List<FormField> receiptItems = receiptItemsField.getValue().asList(); receiptItems.stream() .filter(receiptItem -> FieldValueType.MAP == receiptItem.getValue().getValueType()) .map(formField -> formField.getValue().asMap()) .forEach(formFieldMap -> formFieldMap.forEach((key, formField) -> { if ("Quantity".equals(key)) { if (FieldValueType.FLOAT == formField.getValue().getValueType()) { Float quantity = formField.getValue().asFloat(); System.out.printf("Quantity: %f, confidence: %.2f%n", quantity, formField.getConfidence()); } } })); } } } }); }
.setPollInterval(Duration.ofSeconds(5)))
public void beginRecognizeReceiptsFromUrlWithOptions() { String receiptUrl = "{receiptUrl}"; boolean includeFieldElements = true; formRecognizerAsyncClient.beginRecognizeReceiptsFromUrl(receiptUrl, new RecognizeReceiptsOptions() .setFieldElementsIncluded(includeFieldElements) .setLocale("en-US") .setPollInterval(Duration.ofSeconds(5))) .flatMap(AsyncPollResponse::getFinalResult) .subscribe(recognizedReceipts -> { for (int i = 0; i < recognizedReceipts.size(); i++) { RecognizedForm recognizedReceipt = recognizedReceipts.get(i); Map<String, FormField> recognizedFields = recognizedReceipt.getFields(); System.out.printf("----------- Recognized Receipt page %d -----------%n", i); FormField merchantNameField = recognizedFields.get("MerchantName"); if (merchantNameField != null) { if (FieldValueType.STRING == merchantNameField.getValue().getValueType()) { String merchantName = merchantNameField.getValue().asString(); System.out.printf("Merchant Name: %s, confidence: %.2f%n", merchantName, merchantNameField.getConfidence()); } } FormField merchantPhoneNumberField = recognizedFields.get("MerchantPhoneNumber"); if (merchantPhoneNumberField != null) { if (FieldValueType.PHONE_NUMBER == merchantPhoneNumberField.getValue().getValueType()) { String merchantAddress = merchantPhoneNumberField.getValue().asPhoneNumber(); System.out.printf("Merchant Phone number: %s, confidence: %.2f%n", merchantAddress, merchantPhoneNumberField.getConfidence()); } } FormField transactionDateField = recognizedFields.get("TransactionDate"); if (transactionDateField != null) { if (FieldValueType.DATE == transactionDateField.getValue().getValueType()) { LocalDate transactionDate = transactionDateField.getValue().asDate(); System.out.printf("Transaction Date: %s, confidence: %.2f%n", transactionDate, transactionDateField.getConfidence()); } } FormField receiptItemsField = recognizedFields.get("Items"); if (receiptItemsField != null) { System.out.printf("Receipt Items: %n"); if (FieldValueType.LIST == receiptItemsField.getValue().getValueType()) { List<FormField> receiptItems = receiptItemsField.getValue().asList(); receiptItems.stream() .filter(receiptItem -> FieldValueType.MAP == receiptItem.getValue().getValueType()) .map(formField -> formField.getValue().asMap()) .forEach(formFieldMap -> formFieldMap.forEach((key, formField) -> { if ("Quantity".equals(key)) { if (FieldValueType.FLOAT == formField.getValue().getValueType()) { Float quantity = formField.getValue().asFloat(); System.out.printf("Quantity: %f, confidence: %.2f%n", quantity, formField.getConfidence()); } } })); } } } }); }
class FormRecognizerAsyncClientJavaDocCodeSnippets { FormRecognizerAsyncClient formRecognizerAsyncClient = new FormRecognizerClientBuilder().buildAsyncClient(); /** * Code snippet for creating a {@link FormRecognizerAsyncClient} */ public void createFormRecognizerAsyncClient() { FormRecognizerAsyncClient formRecognizerAsyncClient = new FormRecognizerClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("{endpoint}") .buildAsyncClient(); } /** * Code snippet for creating a {@link FormRecognizerAsyncClient} with pipeline */ public void createFormRecognizerAsyncClientWithPipeline() { HttpPipeline pipeline = new HttpPipelineBuilder() .policies(/* add policies */) .build(); FormRecognizerAsyncClient formRecognizerAsyncClient = new FormRecognizerClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("{endpoint}") .pipeline(pipeline) .buildAsyncClient(); } /** * Code snippet for {@link FormRecognizerAsyncClient */ public void beginRecognizeCustomFormsFromUrl() { String formUrl = "{form_url}"; String modelId = "{custom_trained_model_id}"; formRecognizerAsyncClient.beginRecognizeCustomFormsFromUrl(modelId, formUrl) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(recognizedForm -> recognizedForm.getFields() .forEach((fieldText, formField) -> { System.out.printf("Field text: %s%n", fieldText); System.out.printf("Field value data text: %s%n", formField.getValueData().getText()); System.out.printf("Confidence score: %.2f%n", formField.getConfidence()); })); } /** * Code snippet for * {@link FormRecognizerAsyncClient */ public void beginRecognizeCustomFormsFromUrlWithOptions() { String formUrl = "{formUrl}"; String modelId = "{model_id}"; boolean includeFieldElements = true; formRecognizerAsyncClient.beginRecognizeCustomFormsFromUrl(modelId, formUrl, new RecognizeCustomFormsOptions() .setFieldElementsIncluded(includeFieldElements) .setPollInterval(Duration.ofSeconds(10))) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(recognizedForm -> recognizedForm.getFields() .forEach((fieldText, formField) -> { System.out.printf("Field text: %s%n", fieldText); System.out.printf("Field value data text: %s%n", formField.getValueData().getText()); System.out.printf("Confidence score: %.2f%n", formField.getConfidence()); })); } /** * Code snippet for {@link FormRecognizerAsyncClient * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ public void beginRecognizeCustomForms() throws IOException { File form = new File("{local/file_path/fileName.jpg}"); String modelId = "{custom_trained_model_id}"; Flux<ByteBuffer> buffer = toFluxByteBuffer(new ByteArrayInputStream(Files.readAllBytes(form.toPath()))); formRecognizerAsyncClient.beginRecognizeCustomForms(modelId, buffer, form.length()) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(recognizedForm -> recognizedForm.getFields() .forEach((fieldText, formField) -> { System.out.printf("Field text: %s%n", fieldText); System.out.printf("Field value data text: %s%n", formField.getValueData().getText()); System.out.printf("Confidence score: %.2f%n", formField.getConfidence()); })); } /** * Code snippet for * {@link FormRecognizerAsyncClient * with options * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ public void beginRecognizeCustomFormsWithOptions() throws IOException { File form = new File("{local/file_path/fileName.jpg}"); String modelId = "{custom_trained_model_id}"; boolean includeFieldElements = true; Flux<ByteBuffer> buffer = toFluxByteBuffer(new ByteArrayInputStream(Files.readAllBytes(form.toPath()))); formRecognizerAsyncClient.beginRecognizeCustomForms(modelId, buffer, form.length(), new RecognizeCustomFormsOptions() .setContentType(FormContentType.IMAGE_JPEG) .setFieldElementsIncluded(includeFieldElements) .setPollInterval(Duration.ofSeconds(5))) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(recognizedForm -> recognizedForm.getFields() .forEach((fieldName, formField) -> { System.out.printf("Field text: %s%n", fieldName); System.out.printf("Field value data text: %s%n", formField.getValueData().getText()); System.out.printf("Confidence score: %.2f%n", formField.getConfidence()); })); } /** * Code snippet for {@link FormRecognizerAsyncClient */ public void beginRecognizeContentFromUrl() { String formUrl = "{formUrl}"; formRecognizerAsyncClient.beginRecognizeContentFromUrl(formUrl) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(formPage -> { System.out.printf("Page Angle: %s%n", formPage.getTextAngle()); System.out.printf("Page Dimension unit: %s%n", formPage.getUnit()); System.out.println("Recognized Tables: "); formPage.getTables().forEach(formTable -> formTable.getCells().forEach(recognizedTableCell -> System.out.printf("%s ", recognizedTableCell.getText()))); }); } /** * Code snippet for {@link FormRecognizerAsyncClient * options */ public void beginRecognizeContentFromUrlWithOptions() { String formUrl = "{formUrl}"; formRecognizerAsyncClient.beginRecognizeContentFromUrl(formUrl, new RecognizeContentOptions().setPollInterval(Duration.ofSeconds(5))) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(formPage -> { System.out.printf("Page Angle: %s%n", formPage.getTextAngle()); System.out.printf("Page Dimension unit: %s%n", formPage.getUnit()); System.out.println("Recognized Tables: "); formPage.getTables().forEach(formTable -> formTable.getCells().forEach(recognizedTableCell -> System.out.printf("%s ", recognizedTableCell.getText()))); }); } /** * Code snippet for {@link FormRecognizerAsyncClient * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ public void beginRecognizeContent() throws IOException { File form = new File("{local/file_path/fileName.jpg}"); Flux<ByteBuffer> buffer = toFluxByteBuffer(new ByteArrayInputStream(Files.readAllBytes(form.toPath()))); formRecognizerAsyncClient.beginRecognizeContent(buffer, form.length()) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(formPage -> { System.out.printf("Page Angle: %s%n", formPage.getTextAngle()); System.out.printf("Page Dimension unit: %s%n", formPage.getUnit()); System.out.println("Recognized Tables: "); formPage.getTables().forEach(formTable -> formTable.getCells().forEach(recognizedTableCell -> System.out.printf("%s ", recognizedTableCell.getText()))); }); } /** * Code snippet for {@link FormRecognizerAsyncClient * options * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ public void beginRecognizeContentWithOptions() throws IOException { File form = new File("{local/file_path/fileName.jpg}"); Flux<ByteBuffer> buffer = toFluxByteBuffer(new ByteArrayInputStream(Files.readAllBytes(form.toPath()))); formRecognizerAsyncClient.beginRecognizeContent(buffer, form.length(), new RecognizeContentOptions() .setContentType(FormContentType.APPLICATION_PDF) .setPollInterval(Duration.ofSeconds(5))) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(formPage -> { System.out.printf("Page Angle: %s%n", formPage.getTextAngle()); System.out.printf("Page Dimension unit: %s%n", formPage.getUnit()); System.out.println("Recognized Tables: "); formPage.getTables().forEach(formTable -> formTable.getCells().forEach(recognizedTableCell -> System.out.printf("%s ", recognizedTableCell.getText()))); }); } /** * Code snippet for {@link FormRecognizerAsyncClient */ public void beginRecognizeReceiptsFromUrl() { String receiptUrl = "{receiptUrl}"; formRecognizerAsyncClient.beginRecognizeReceiptsFromUrl(receiptUrl) .flatMap(AsyncPollResponse::getFinalResult) .subscribe(recognizedReceipts -> { for (int i = 0; i < recognizedReceipts.size(); i++) { RecognizedForm recognizedForm = recognizedReceipts.get(i); Map<String, FormField> recognizedFields = recognizedForm.getFields(); System.out.printf("----------- Recognized Receipt page %d -----------%n", i); FormField merchantNameField = recognizedFields.get("MerchantName"); if (merchantNameField != null) { if (FieldValueType.STRING == merchantNameField.getValue().getValueType()) { String merchantName = merchantNameField.getValue().asString(); System.out.printf("Merchant Name: %s, confidence: %.2f%n", merchantName, merchantNameField.getConfidence()); } } FormField merchantPhoneNumberField = recognizedFields.get("MerchantPhoneNumber"); if (merchantPhoneNumberField != null) { if (FieldValueType.PHONE_NUMBER == merchantPhoneNumberField.getValue().getValueType()) { String merchantAddress = merchantPhoneNumberField.getValue().asPhoneNumber(); System.out.printf("Merchant Phone number: %s, confidence: %.2f%n", merchantAddress, merchantPhoneNumberField.getConfidence()); } } FormField transactionDateField = recognizedFields.get("TransactionDate"); if (transactionDateField != null) { if (FieldValueType.DATE == transactionDateField.getValue().getValueType()) { LocalDate transactionDate = transactionDateField.getValue().asDate(); System.out.printf("Transaction Date: %s, confidence: %.2f%n", transactionDate, transactionDateField.getConfidence()); } } FormField receiptItemsField = recognizedFields.get("Items"); if (receiptItemsField != null) { System.out.printf("Receipt Items: %n"); if (FieldValueType.LIST == receiptItemsField.getValue().getValueType()) { List<FormField> receiptItems = receiptItemsField.getValue().asList(); receiptItems.stream() .filter(receiptItem -> FieldValueType.MAP == receiptItem.getValue().getValueType()) .map(formField -> formField.getValue().asMap()) .forEach(formFieldMap -> formFieldMap.forEach((key, formField) -> { if ("Quantity".equals(key)) { if (FieldValueType.FLOAT == formField.getValue().getValueType()) { Float quantity = formField.getValue().asFloat(); System.out.printf("Quantity: %f, confidence: %.2f%n", quantity, formField.getConfidence()); } } })); } } } }); } /** * Code snippet for {@link FormRecognizerAsyncClient */ /** * Code snippet for {@link FormRecognizerAsyncClient * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ public void beginRecognizeReceipts() throws IOException { File receipt = new File("{file_source_url}"); Flux<ByteBuffer> buffer = toFluxByteBuffer(new ByteArrayInputStream(Files.readAllBytes(receipt.toPath()))); formRecognizerAsyncClient.beginRecognizeReceipts(buffer, receipt.length()) .flatMap(AsyncPollResponse::getFinalResult) .subscribe(recognizedReceipts -> { for (int i = 0; i < recognizedReceipts.size(); i++) { RecognizedForm recognizedForm = recognizedReceipts.get(i); Map<String, FormField> recognizedFields = recognizedForm.getFields(); System.out.printf("----------- Recognized Receipt page %d -----------%n", i); FormField merchantNameField = recognizedFields.get("MerchantName"); if (merchantNameField != null) { if (FieldValueType.STRING == merchantNameField.getValue().getValueType()) { String merchantName = merchantNameField.getValue().asString(); System.out.printf("Merchant Name: %s, confidence: %.2f%n", merchantName, merchantNameField.getConfidence()); } } FormField merchantPhoneNumberField = recognizedFields.get("MerchantPhoneNumber"); if (merchantPhoneNumberField != null) { if (FieldValueType.PHONE_NUMBER == merchantPhoneNumberField.getValue().getValueType()) { String merchantAddress = merchantPhoneNumberField.getValue().asPhoneNumber(); System.out.printf("Merchant Phone number: %s, confidence: %.2f%n", merchantAddress, merchantPhoneNumberField.getConfidence()); } } FormField transactionDateField = recognizedFields.get("TransactionDate"); if (transactionDateField != null) { if (FieldValueType.DATE == transactionDateField.getValue().getValueType()) { LocalDate transactionDate = transactionDateField.getValue().asDate(); System.out.printf("Transaction Date: %s, confidence: %.2f%n", transactionDate, transactionDateField.getConfidence()); } } FormField receiptItemsField = recognizedFields.get("Items"); if (receiptItemsField != null) { System.out.printf("Receipt Items: %n"); if (FieldValueType.LIST == receiptItemsField.getValue().getValueType()) { List<FormField> receiptItems = receiptItemsField.getValue().asList(); receiptItems.stream() .filter(receiptItem -> FieldValueType.MAP == receiptItem.getValue().getValueType()) .map(formField -> formField.getValue().asMap()) .forEach(formFieldMap -> formFieldMap.forEach((key, formField) -> { if ("Quantity".equals(key)) { if (FieldValueType.FLOAT == formField.getValue().getValueType()) { Float quantity = formField.getValue().asFloat(); System.out.printf("Quantity: %f, confidence: %.2f%n", quantity, formField.getConfidence()); } } })); } } } }); } /** * Code snippet for {@link FormRecognizerAsyncClient * options * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ public void beginRecognizeReceiptsWithOptions() throws IOException { File receipt = new File("{local/file_path/fileName.jpg}"); boolean includeFieldElements = true; Flux<ByteBuffer> buffer = toFluxByteBuffer(new ByteArrayInputStream(Files.readAllBytes(receipt.toPath()))); formRecognizerAsyncClient.beginRecognizeReceipts(buffer, receipt.length(), new RecognizeReceiptsOptions() .setContentType(FormContentType.IMAGE_JPEG) .setFieldElementsIncluded(includeFieldElements) .setLocale("en-US") .setPollInterval(Duration.ofSeconds(5))) .flatMap(AsyncPollResponse::getFinalResult) .subscribe(recognizedReceipts -> { for (int i = 0; i < recognizedReceipts.size(); i++) { RecognizedForm recognizedForm = recognizedReceipts.get(i); Map<String, FormField> recognizedFields = recognizedForm.getFields(); System.out.printf("----------- Recognized Receipt page %d -----------%n", i); FormField merchantNameField = recognizedFields.get("MerchantName"); if (merchantNameField != null) { if (FieldValueType.STRING == merchantNameField.getValue().getValueType()) { String merchantName = merchantNameField.getValue().asString(); System.out.printf("Merchant Name: %s, confidence: %.2f%n", merchantName, merchantNameField.getConfidence()); } } FormField merchantPhoneNumberField = recognizedFields.get("MerchantPhoneNumber"); if (merchantPhoneNumberField != null) { if (FieldValueType.PHONE_NUMBER == merchantPhoneNumberField.getValue().getValueType()) { String merchantAddress = merchantPhoneNumberField.getValue().asPhoneNumber(); System.out.printf("Merchant Phone number: %s, confidence: %.2f%n", merchantAddress, merchantPhoneNumberField.getConfidence()); } } FormField transactionDateField = recognizedFields.get("TransactionDate"); if (transactionDateField != null) { if (FieldValueType.DATE == transactionDateField.getValue().getValueType()) { LocalDate transactionDate = transactionDateField.getValue().asDate(); System.out.printf("Transaction Date: %s, confidence: %.2f%n", transactionDate, transactionDateField.getConfidence()); } } FormField receiptItemsField = recognizedFields.get("Items"); if (receiptItemsField != null) { System.out.printf("Receipt Items: %n"); if (FieldValueType.LIST == receiptItemsField.getValue().getValueType()) { List<FormField> receiptItems = receiptItemsField.getValue().asList(); receiptItems.stream() .filter(receiptItem -> FieldValueType.MAP == receiptItem.getValue().getValueType()) .map(formField -> formField.getValue().asMap()) .forEach(formFieldMap -> formFieldMap.forEach((key, formField) -> { if ("Quantity".equals(key)) { if (FieldValueType.FLOAT == formField.getValue().getValueType()) { Float quantity = formField.getValue().asFloat(); System.out.printf("Quantity: %f, confidence: %.2f%n", quantity, formField.getConfidence()); } } })); } } } }); } }
class FormRecognizerAsyncClientJavaDocCodeSnippets { FormRecognizerAsyncClient formRecognizerAsyncClient = new FormRecognizerClientBuilder().buildAsyncClient(); /** * Code snippet for creating a {@link FormRecognizerAsyncClient} */ public void createFormRecognizerAsyncClient() { FormRecognizerAsyncClient formRecognizerAsyncClient = new FormRecognizerClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("{endpoint}") .buildAsyncClient(); } /** * Code snippet for creating a {@link FormRecognizerAsyncClient} with pipeline */ public void createFormRecognizerAsyncClientWithPipeline() { HttpPipeline pipeline = new HttpPipelineBuilder() .policies(/* add policies */) .build(); FormRecognizerAsyncClient formRecognizerAsyncClient = new FormRecognizerClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("{endpoint}") .pipeline(pipeline) .buildAsyncClient(); } /** * Code snippet for {@link FormRecognizerAsyncClient */ public void beginRecognizeCustomFormsFromUrl() { String formUrl = "{form_url}"; String modelId = "{custom_trained_model_id}"; formRecognizerAsyncClient.beginRecognizeCustomFormsFromUrl(modelId, formUrl) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(recognizedForm -> recognizedForm.getFields() .forEach((fieldText, formField) -> { System.out.printf("Field text: %s%n", fieldText); System.out.printf("Field value data text: %s%n", formField.getValueData().getText()); System.out.printf("Confidence score: %.2f%n", formField.getConfidence()); })); } /** * Code snippet for * {@link FormRecognizerAsyncClient */ public void beginRecognizeCustomFormsFromUrlWithOptions() { String formUrl = "{formUrl}"; String modelId = "{model_id}"; boolean includeFieldElements = true; formRecognizerAsyncClient.beginRecognizeCustomFormsFromUrl(modelId, formUrl, new RecognizeCustomFormsOptions() .setFieldElementsIncluded(includeFieldElements) .setPollInterval(Duration.ofSeconds(10))) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(recognizedForm -> recognizedForm.getFields() .forEach((fieldText, formField) -> { System.out.printf("Field text: %s%n", fieldText); System.out.printf("Field value data text: %s%n", formField.getValueData().getText()); System.out.printf("Confidence score: %.2f%n", formField.getConfidence()); })); } /** * Code snippet for {@link FormRecognizerAsyncClient * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ public void beginRecognizeCustomForms() throws IOException { File form = new File("{local/file_path/fileName.jpg}"); String modelId = "{custom_trained_model_id}"; Flux<ByteBuffer> buffer = toFluxByteBuffer(new ByteArrayInputStream(Files.readAllBytes(form.toPath()))); formRecognizerAsyncClient.beginRecognizeCustomForms(modelId, buffer, form.length()) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(recognizedForm -> recognizedForm.getFields() .forEach((fieldText, formField) -> { System.out.printf("Field text: %s%n", fieldText); System.out.printf("Field value data text: %s%n", formField.getValueData().getText()); System.out.printf("Confidence score: %.2f%n", formField.getConfidence()); })); } /** * Code snippet for * {@link FormRecognizerAsyncClient * with options * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ public void beginRecognizeCustomFormsWithOptions() throws IOException { File form = new File("{local/file_path/fileName.jpg}"); String modelId = "{custom_trained_model_id}"; boolean includeFieldElements = true; Flux<ByteBuffer> buffer = toFluxByteBuffer(new ByteArrayInputStream(Files.readAllBytes(form.toPath()))); formRecognizerAsyncClient.beginRecognizeCustomForms(modelId, buffer, form.length(), new RecognizeCustomFormsOptions() .setContentType(FormContentType.IMAGE_JPEG) .setFieldElementsIncluded(includeFieldElements) .setPollInterval(Duration.ofSeconds(5))) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(recognizedForm -> recognizedForm.getFields() .forEach((fieldName, formField) -> { System.out.printf("Field text: %s%n", fieldName); System.out.printf("Field value data text: %s%n", formField.getValueData().getText()); System.out.printf("Confidence score: %.2f%n", formField.getConfidence()); })); } /** * Code snippet for {@link FormRecognizerAsyncClient */ public void beginRecognizeContentFromUrl() { String formUrl = "{formUrl}"; formRecognizerAsyncClient.beginRecognizeContentFromUrl(formUrl) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(formPage -> { System.out.printf("Page Angle: %s%n", formPage.getTextAngle()); System.out.printf("Page Dimension unit: %s%n", formPage.getUnit()); System.out.println("Recognized Tables: "); formPage.getTables().forEach(formTable -> formTable.getCells().forEach(recognizedTableCell -> System.out.printf("%s ", recognizedTableCell.getText()))); }); } /** * Code snippet for {@link FormRecognizerAsyncClient * options */ public void beginRecognizeContentFromUrlWithOptions() { String formUrl = "{formUrl}"; formRecognizerAsyncClient.beginRecognizeContentFromUrl(formUrl, new RecognizeContentOptions().setPollInterval(Duration.ofSeconds(5))) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(formPage -> { System.out.printf("Page Angle: %s%n", formPage.getTextAngle()); System.out.printf("Page Dimension unit: %s%n", formPage.getUnit()); System.out.println("Recognized Tables: "); formPage.getTables().forEach(formTable -> formTable.getCells().forEach(recognizedTableCell -> System.out.printf("%s ", recognizedTableCell.getText()))); }); } /** * Code snippet for {@link FormRecognizerAsyncClient * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ public void beginRecognizeContent() throws IOException { File form = new File("{local/file_path/fileName.jpg}"); Flux<ByteBuffer> buffer = toFluxByteBuffer(new ByteArrayInputStream(Files.readAllBytes(form.toPath()))); formRecognizerAsyncClient.beginRecognizeContent(buffer, form.length()) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(formPage -> { System.out.printf("Page Angle: %s%n", formPage.getTextAngle()); System.out.printf("Page Dimension unit: %s%n", formPage.getUnit()); System.out.println("Recognized Tables: "); formPage.getTables().forEach(formTable -> formTable.getCells().forEach(recognizedTableCell -> System.out.printf("%s ", recognizedTableCell.getText()))); }); } /** * Code snippet for {@link FormRecognizerAsyncClient * options * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ public void beginRecognizeContentWithOptions() throws IOException { File form = new File("{local/file_path/fileName.jpg}"); Flux<ByteBuffer> buffer = toFluxByteBuffer(new ByteArrayInputStream(Files.readAllBytes(form.toPath()))); formRecognizerAsyncClient.beginRecognizeContent(buffer, form.length(), new RecognizeContentOptions() .setContentType(FormContentType.APPLICATION_PDF) .setPollInterval(Duration.ofSeconds(5))) .flatMap(AsyncPollResponse::getFinalResult) .flatMap(Flux::fromIterable) .subscribe(formPage -> { System.out.printf("Page Angle: %s%n", formPage.getTextAngle()); System.out.printf("Page Dimension unit: %s%n", formPage.getUnit()); System.out.println("Recognized Tables: "); formPage.getTables().forEach(formTable -> formTable.getCells().forEach(recognizedTableCell -> System.out.printf("%s ", recognizedTableCell.getText()))); }); } /** * Code snippet for {@link FormRecognizerAsyncClient */ public void beginRecognizeReceiptsFromUrl() { String receiptUrl = "{receiptUrl}"; formRecognizerAsyncClient.beginRecognizeReceiptsFromUrl(receiptUrl) .flatMap(AsyncPollResponse::getFinalResult) .subscribe(recognizedReceipts -> { for (int i = 0; i < recognizedReceipts.size(); i++) { RecognizedForm recognizedForm = recognizedReceipts.get(i); Map<String, FormField> recognizedFields = recognizedForm.getFields(); System.out.printf("----------- Recognized Receipt page %d -----------%n", i); FormField merchantNameField = recognizedFields.get("MerchantName"); if (merchantNameField != null) { if (FieldValueType.STRING == merchantNameField.getValue().getValueType()) { String merchantName = merchantNameField.getValue().asString(); System.out.printf("Merchant Name: %s, confidence: %.2f%n", merchantName, merchantNameField.getConfidence()); } } FormField merchantPhoneNumberField = recognizedFields.get("MerchantPhoneNumber"); if (merchantPhoneNumberField != null) { if (FieldValueType.PHONE_NUMBER == merchantPhoneNumberField.getValue().getValueType()) { String merchantAddress = merchantPhoneNumberField.getValue().asPhoneNumber(); System.out.printf("Merchant Phone number: %s, confidence: %.2f%n", merchantAddress, merchantPhoneNumberField.getConfidence()); } } FormField transactionDateField = recognizedFields.get("TransactionDate"); if (transactionDateField != null) { if (FieldValueType.DATE == transactionDateField.getValue().getValueType()) { LocalDate transactionDate = transactionDateField.getValue().asDate(); System.out.printf("Transaction Date: %s, confidence: %.2f%n", transactionDate, transactionDateField.getConfidence()); } } FormField receiptItemsField = recognizedFields.get("Items"); if (receiptItemsField != null) { System.out.printf("Receipt Items: %n"); if (FieldValueType.LIST == receiptItemsField.getValue().getValueType()) { List<FormField> receiptItems = receiptItemsField.getValue().asList(); receiptItems.stream() .filter(receiptItem -> FieldValueType.MAP == receiptItem.getValue().getValueType()) .map(formField -> formField.getValue().asMap()) .forEach(formFieldMap -> formFieldMap.forEach((key, formField) -> { if ("Quantity".equals(key)) { if (FieldValueType.FLOAT == formField.getValue().getValueType()) { Float quantity = formField.getValue().asFloat(); System.out.printf("Quantity: %f, confidence: %.2f%n", quantity, formField.getConfidence()); } } })); } } } }); } /** * Code snippet for {@link FormRecognizerAsyncClient */ /** * Code snippet for {@link FormRecognizerAsyncClient * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ public void beginRecognizeReceipts() throws IOException { File receipt = new File("{file_source_url}"); Flux<ByteBuffer> buffer = toFluxByteBuffer(new ByteArrayInputStream(Files.readAllBytes(receipt.toPath()))); formRecognizerAsyncClient.beginRecognizeReceipts(buffer, receipt.length()) .flatMap(AsyncPollResponse::getFinalResult) .subscribe(recognizedReceipts -> { for (int i = 0; i < recognizedReceipts.size(); i++) { RecognizedForm recognizedForm = recognizedReceipts.get(i); Map<String, FormField> recognizedFields = recognizedForm.getFields(); System.out.printf("----------- Recognized Receipt page %d -----------%n", i); FormField merchantNameField = recognizedFields.get("MerchantName"); if (merchantNameField != null) { if (FieldValueType.STRING == merchantNameField.getValue().getValueType()) { String merchantName = merchantNameField.getValue().asString(); System.out.printf("Merchant Name: %s, confidence: %.2f%n", merchantName, merchantNameField.getConfidence()); } } FormField merchantPhoneNumberField = recognizedFields.get("MerchantPhoneNumber"); if (merchantPhoneNumberField != null) { if (FieldValueType.PHONE_NUMBER == merchantPhoneNumberField.getValue().getValueType()) { String merchantAddress = merchantPhoneNumberField.getValue().asPhoneNumber(); System.out.printf("Merchant Phone number: %s, confidence: %.2f%n", merchantAddress, merchantPhoneNumberField.getConfidence()); } } FormField transactionDateField = recognizedFields.get("TransactionDate"); if (transactionDateField != null) { if (FieldValueType.DATE == transactionDateField.getValue().getValueType()) { LocalDate transactionDate = transactionDateField.getValue().asDate(); System.out.printf("Transaction Date: %s, confidence: %.2f%n", transactionDate, transactionDateField.getConfidence()); } } FormField receiptItemsField = recognizedFields.get("Items"); if (receiptItemsField != null) { System.out.printf("Receipt Items: %n"); if (FieldValueType.LIST == receiptItemsField.getValue().getValueType()) { List<FormField> receiptItems = receiptItemsField.getValue().asList(); receiptItems.stream() .filter(receiptItem -> FieldValueType.MAP == receiptItem.getValue().getValueType()) .map(formField -> formField.getValue().asMap()) .forEach(formFieldMap -> formFieldMap.forEach((key, formField) -> { if ("Quantity".equals(key)) { if (FieldValueType.FLOAT == formField.getValue().getValueType()) { Float quantity = formField.getValue().asFloat(); System.out.printf("Quantity: %f, confidence: %.2f%n", quantity, formField.getConfidence()); } } })); } } } }); } /** * Code snippet for {@link FormRecognizerAsyncClient * options * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ public void beginRecognizeReceiptsWithOptions() throws IOException { File receipt = new File("{local/file_path/fileName.jpg}"); boolean includeFieldElements = true; Flux<ByteBuffer> buffer = toFluxByteBuffer(new ByteArrayInputStream(Files.readAllBytes(receipt.toPath()))); formRecognizerAsyncClient.beginRecognizeReceipts(buffer, receipt.length(), new RecognizeReceiptsOptions() .setContentType(FormContentType.IMAGE_JPEG) .setFieldElementsIncluded(includeFieldElements) .setLocale("en-US") .setPollInterval(Duration.ofSeconds(5))) .flatMap(AsyncPollResponse::getFinalResult) .subscribe(recognizedReceipts -> { for (int i = 0; i < recognizedReceipts.size(); i++) { RecognizedForm recognizedForm = recognizedReceipts.get(i); Map<String, FormField> recognizedFields = recognizedForm.getFields(); System.out.printf("----------- Recognized Receipt page %d -----------%n", i); FormField merchantNameField = recognizedFields.get("MerchantName"); if (merchantNameField != null) { if (FieldValueType.STRING == merchantNameField.getValue().getValueType()) { String merchantName = merchantNameField.getValue().asString(); System.out.printf("Merchant Name: %s, confidence: %.2f%n", merchantName, merchantNameField.getConfidence()); } } FormField merchantPhoneNumberField = recognizedFields.get("MerchantPhoneNumber"); if (merchantPhoneNumberField != null) { if (FieldValueType.PHONE_NUMBER == merchantPhoneNumberField.getValue().getValueType()) { String merchantAddress = merchantPhoneNumberField.getValue().asPhoneNumber(); System.out.printf("Merchant Phone number: %s, confidence: %.2f%n", merchantAddress, merchantPhoneNumberField.getConfidence()); } } FormField transactionDateField = recognizedFields.get("TransactionDate"); if (transactionDateField != null) { if (FieldValueType.DATE == transactionDateField.getValue().getValueType()) { LocalDate transactionDate = transactionDateField.getValue().asDate(); System.out.printf("Transaction Date: %s, confidence: %.2f%n", transactionDate, transactionDateField.getConfidence()); } } FormField receiptItemsField = recognizedFields.get("Items"); if (receiptItemsField != null) { System.out.printf("Receipt Items: %n"); if (FieldValueType.LIST == receiptItemsField.getValue().getValueType()) { List<FormField> receiptItems = receiptItemsField.getValue().asList(); receiptItems.stream() .filter(receiptItem -> FieldValueType.MAP == receiptItem.getValue().getValueType()) .map(formField -> formField.getValue().asMap()) .forEach(formFieldMap -> formFieldMap.forEach((key, formField) -> { if ("Quantity".equals(key)) { if (FieldValueType.FLOAT == formField.getValue().getValueType()) { Float quantity = formField.getValue().asFloat(); System.out.printf("Quantity: %f, confidence: %.2f%n", quantity, formField.getConfidence()); } } })); } } } }); } /** * Code snippet for {@link FormRecognizerAsyncClient */ public void beginRecognizeBusinessCardsFromUrl() { String businessCardUrl = "{business_card_url}"; formRecognizerAsyncClient.beginRecognizeBusinessCardsFromUrl(businessCardUrl) .flatMap(AsyncPollResponse::getFinalResult) .subscribe(recognizedBusinessCards -> { for (int i = 0; i < recognizedBusinessCards.size(); i++) { RecognizedForm recognizedForm = recognizedBusinessCards.get(i); Map<String, FormField> recognizedFields = recognizedForm.getFields(); System.out.printf("----------- Recognized Business Card page %d -----------%n", i); FormField contactNamesFormField = recognizedFields.get("ContactNames"); if (contactNamesFormField != null) { if (FieldValueType.LIST == contactNamesFormField.getValue().getValueType()) { List<FormField> contactNamesList = contactNamesFormField.getValue().asList(); contactNamesList.stream() .filter(contactName -> FieldValueType.MAP == contactName.getValue().getValueType()) .map(contactName -> { System.out.printf("Contact name: %s%n", contactName.getValueData().getText()); return contactName.getValue().asMap(); }) .forEach(contactNamesMap -> contactNamesMap.forEach((key, contactName) -> { if ("FirstName".equals(key)) { if (FieldValueType.STRING == contactName.getValue().getValueType()) { String firstName = contactName.getValue().asString(); System.out.printf("\tFirst Name: %s, confidence: %.2f%n", firstName, contactName.getConfidence()); } } if ("LastName".equals(key)) { if (FieldValueType.STRING == contactName.getValue().getValueType()) { String lastName = contactName.getValue().asString(); System.out.printf("\tLast Name: %s, confidence: %.2f%n", lastName, contactName.getConfidence()); } } })); } } FormField jobTitles = recognizedFields.get("JobTitles"); if (jobTitles != null) { if (FieldValueType.LIST == jobTitles.getValue().getValueType()) { List<FormField> jobTitlesItems = jobTitles.getValue().asList(); jobTitlesItems.stream().forEach(jobTitlesItem -> { if (FieldValueType.STRING == jobTitlesItem.getValue().getValueType()) { String jobTitle = jobTitlesItem.getValue().asString(); System.out.printf("Job Title: %s, confidence: %.2f%n", jobTitle, jobTitlesItem.getConfidence()); } }); } } } }); } /** * Code snippet for * {@link FormRecognizerAsyncClient */ public void beginRecognizeBusinessCardsFromUrlWithOptions() { String businessCardUrl = "{business_card_url}"; boolean includeFieldElements = true; formRecognizerAsyncClient.beginRecognizeBusinessCardsFromUrl(businessCardUrl, new RecognizeBusinessCardsOptions() .setFieldElementsIncluded(includeFieldElements) .setPollInterval(Duration.ofSeconds(5))) .flatMap(AsyncPollResponse::getFinalResult) .subscribe(recognizedBusinessCards -> { for (int i = 0; i < recognizedBusinessCards.size(); i++) { RecognizedForm recognizedBusinessCard = recognizedBusinessCards.get(i); Map<String, FormField> recognizedFields = recognizedBusinessCard.getFields(); System.out.printf("----------- Recognized Business Card page %d -----------%n", i); FormField contactNamesFormField = recognizedFields.get("ContactNames"); if (contactNamesFormField != null) { if (FieldValueType.LIST == contactNamesFormField.getValue().getValueType()) { List<FormField> contactNamesList = contactNamesFormField.getValue().asList(); contactNamesList.stream() .filter(contactName -> FieldValueType.MAP == contactName.getValue().getValueType()) .map(contactName -> { System.out.printf("Contact name: %s%n", contactName.getValueData().getText()); return contactName.getValue().asMap(); }) .forEach(contactNamesMap -> contactNamesMap.forEach((key, contactName) -> { if ("FirstName".equals(key)) { if (FieldValueType.STRING == contactName.getValue().getValueType()) { String firstName = contactName.getValue().asString(); System.out.printf("\tFirst Name: %s, confidence: %.2f%n", firstName, contactName.getConfidence()); } } if ("LastName".equals(key)) { if (FieldValueType.STRING == contactName.getValue().getValueType()) { String lastName = contactName.getValue().asString(); System.out.printf("\tLast Name: %s, confidence: %.2f%n", lastName, contactName.getConfidence()); } } })); } } FormField jobTitles = recognizedFields.get("JobTitles"); if (jobTitles != null) { if (FieldValueType.LIST == jobTitles.getValue().getValueType()) { List<FormField> jobTitlesItems = jobTitles.getValue().asList(); jobTitlesItems.stream().forEach(jobTitlesItem -> { if (FieldValueType.STRING == jobTitlesItem.getValue().getValueType()) { String jobTitle = jobTitlesItem.getValue().asString(); System.out.printf("Job Title: %s, confidence: %.2f%n", jobTitle, jobTitlesItem.getConfidence()); } }); } } } }); } /** * Code snippet for {@link FormRecognizerAsyncClient * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ public void beginRecognizeBusinessCards() throws IOException { File businessCard = new File("{local/file_path/fileName.jpg}"); Flux<ByteBuffer> buffer = toFluxByteBuffer(new ByteArrayInputStream(Files.readAllBytes(businessCard.toPath()))); formRecognizerAsyncClient.beginRecognizeBusinessCards(buffer, businessCard.length()) .flatMap(AsyncPollResponse::getFinalResult) .subscribe(recognizedBusinessCards -> { for (int i = 0; i < recognizedBusinessCards.size(); i++) { RecognizedForm recognizedForm = recognizedBusinessCards.get(i); Map<String, FormField> recognizedFields = recognizedForm.getFields(); System.out.printf("----------- Recognized Business Card page %d -----------%n", i); FormField contactNamesFormField = recognizedFields.get("ContactNames"); if (contactNamesFormField != null) { if (FieldValueType.LIST == contactNamesFormField.getValue().getValueType()) { List<FormField> contactNamesList = contactNamesFormField.getValue().asList(); contactNamesList.stream() .filter(contactName -> FieldValueType.MAP == contactName.getValue().getValueType()) .map(contactName -> { System.out.printf("Contact name: %s%n", contactName.getValueData().getText()); return contactName.getValue().asMap(); }) .forEach(contactNamesMap -> contactNamesMap.forEach((key, contactName) -> { if ("FirstName".equals(key)) { if (FieldValueType.STRING == contactName.getValue().getValueType()) { String firstName = contactName.getValue().asString(); System.out.printf("\tFirst Name: %s, confidence: %.2f%n", firstName, contactName.getConfidence()); } } if ("LastName".equals(key)) { if (FieldValueType.STRING == contactName.getValue().getValueType()) { String lastName = contactName.getValue().asString(); System.out.printf("\tLast Name: %s, confidence: %.2f%n", lastName, contactName.getConfidence()); } } })); } } FormField jobTitles = recognizedFields.get("JobTitles"); if (jobTitles != null) { if (FieldValueType.LIST == jobTitles.getValue().getValueType()) { List<FormField> jobTitlesItems = jobTitles.getValue().asList(); jobTitlesItems.stream().forEach(jobTitlesItem -> { if (FieldValueType.STRING == jobTitlesItem.getValue().getValueType()) { String jobTitle = jobTitlesItem.getValue().asString(); System.out.printf("Job Title: %s, confidence: %.2f%n", jobTitle, jobTitlesItem.getConfidence()); } }); } } } }); } /** * Code snippet for * {@link FormRecognizerAsyncClient * options * * @throws IOException Exception thrown when there is an error in reading all the bytes from the File. */ public void beginRecognizeBusinessCardsWithOptions() throws IOException { File businessCard = new File("{local/file_path/fileName.jpg}"); boolean includeFieldElements = true; Flux<ByteBuffer> buffer = toFluxByteBuffer(new ByteArrayInputStream(Files.readAllBytes(businessCard.toPath()))); formRecognizerAsyncClient.beginRecognizeBusinessCards(buffer, businessCard.length(), new RecognizeBusinessCardsOptions() .setContentType(FormContentType.IMAGE_JPEG) .setFieldElementsIncluded(includeFieldElements) .setPollInterval(Duration.ofSeconds(5))) .flatMap(AsyncPollResponse::getFinalResult) .subscribe(recognizedBusinessCards -> { for (int i = 0; i < recognizedBusinessCards.size(); i++) { RecognizedForm recognizedForm = recognizedBusinessCards.get(i); Map<String, FormField> recognizedFields = recognizedForm.getFields(); System.out.printf("----------- Recognized Business Card page %d -----------%n", i); FormField contactNamesFormField = recognizedFields.get("ContactNames"); if (contactNamesFormField != null) { if (FieldValueType.LIST == contactNamesFormField.getValue().getValueType()) { List<FormField> contactNamesList = contactNamesFormField.getValue().asList(); contactNamesList.stream() .filter(contactName -> FieldValueType.MAP == contactName.getValue().getValueType()) .map(contactName -> { System.out.printf("Contact name: %s%n", contactName.getValueData().getText()); return contactName.getValue().asMap(); }) .forEach(contactNamesMap -> contactNamesMap.forEach((key, contactName) -> { if ("FirstName".equals(key)) { if (FieldValueType.STRING == contactName.getValue().getValueType()) { String firstName = contactName.getValue().asString(); System.out.printf("\tFirst Name: %s, confidence: %.2f%n", firstName, contactName.getConfidence()); } } if ("LastName".equals(key)) { if (FieldValueType.STRING == contactName.getValue().getValueType()) { String lastName = contactName.getValue().asString(); System.out.printf("\tLast Name: %s, confidence: %.2f%n", lastName, contactName.getConfidence()); } } })); } } FormField jobTitles = recognizedFields.get("JobTitles"); if (jobTitles != null) { if (FieldValueType.LIST == jobTitles.getValue().getValueType()) { List<FormField> jobTitlesItems = jobTitles.getValue().asList(); jobTitlesItems.stream().forEach(jobTitlesItem -> { if (FieldValueType.STRING == jobTitlesItem.getValue().getValueType()) { String jobTitle = jobTitlesItem.getValue().asString(); System.out.printf("Job Title: %s, confidence: %.2f%n", jobTitle, jobTitlesItem.getConfidence()); } }); } } } }); } }
Do similar happen for reactor http code paths also?
public void close(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "close"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CLOSE); } else { logger.debug("{} closed exceptionally", context); } final SslHandler sslHandler = context.pipeline().get(SslHandler.class); if (sslHandler != null) { try { sslHandler.closeOutbound(); } catch (Exception exception) { if (exception instanceof SSLException) { logger.debug( "SslException when attempting to close the outbound SSL connection: {}", exception.toString()); } else { logger.warn( "Exception when attempting to close the outbound SSL connection: {}", exception.toString()); throw exception; } } } context.close(promise); }
if (exception instanceof SSLException) {
public void close(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "close"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CLOSE); } else { logger.debug("{} closed exceptionally", context); } final SslHandler sslHandler = context.pipeline().get(SslHandler.class); if (sslHandler != null) { try { sslHandler.closeOutbound(); } catch (Exception exception) { if (exception instanceof SSLException) { logger.debug( "SslException when attempting to close the outbound SSL connection: ", exception); } else { logger.warn( "Exception when attempting to close the outbound SSL connection: ", exception); throw exception; } } } context.close(promise); }
class RntbdRequestManager implements ChannelHandler, ChannelInboundHandler, ChannelOutboundHandler { private static final ClosedChannelException ON_CHANNEL_UNREGISTERED = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "channelUnregistered"); private static final ClosedChannelException ON_CLOSE = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "close"); private static final ClosedChannelException ON_DEREGISTER = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "deregister"); private static final EventExecutor requestExpirationExecutor = new DefaultEventExecutor(new RntbdThreadFactory( "request-expirator", true, Thread.NORM_PRIORITY)); private static final Logger logger = LoggerFactory.getLogger(RntbdRequestManager.class); private final CompletableFuture<RntbdContext> contextFuture = new CompletableFuture<>(); private final CompletableFuture<RntbdContextRequest> contextRequestFuture = new CompletableFuture<>(); private final ChannelHealthChecker healthChecker; private final int pendingRequestLimit; private final ConcurrentHashMap<Long, RntbdRequestRecord> pendingRequests; private final Timestamps timestamps = new Timestamps(); private boolean closingExceptionally = false; private CoalescingBufferQueue pendingWrites; public RntbdRequestManager(final ChannelHealthChecker healthChecker, final int pendingRequestLimit) { checkArgument(pendingRequestLimit > 0, "pendingRequestLimit: %s", pendingRequestLimit); checkNotNull(healthChecker, "healthChecker"); this.pendingRequests = new ConcurrentHashMap<>(pendingRequestLimit); this.pendingRequestLimit = pendingRequestLimit; this.healthChecker = healthChecker; } /** * Gets called after the {@link ChannelHandler} was added to the actual context and it's ready to handle events. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerAdded(final ChannelHandlerContext context) { this.traceOperation(context, "handlerAdded"); } /** * Gets called after the {@link ChannelHandler} was removed from the actual context and it doesn't handle events * anymore. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerRemoved(final ChannelHandlerContext context) { this.traceOperation(context, "handlerRemoved"); } /** * The {@link Channel} of the {@link ChannelHandlerContext} is now active * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelActive(final ChannelHandlerContext context) { this.traceOperation(context, "channelActive"); context.fireChannelActive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was registered and has reached the end of its lifetime * <p> * This method will only be called after the channel is closed. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelInactive(final ChannelHandlerContext context) { this.traceOperation(context, "channelInactive"); context.fireChannelInactive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} has read a message from its peer. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs. * @param message The message read. */ @Override public void channelRead(final ChannelHandlerContext context, final Object message) { this.traceOperation(context, "channelRead"); try { if (message.getClass() == RntbdResponse.class) { try { this.messageReceived(context, (RntbdResponse) message); } catch (CorruptedFrameException error) { this.exceptionCaught(context, error); } catch (Throwable throwable) { reportIssue(context, "{} ", message, throwable); this.exceptionCaught(context, throwable); } } else { final IllegalStateException error = new IllegalStateException( lenientFormat("expected message of %s, not %s: %s", RntbdResponse.class, message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } } finally { if (message instanceof ReferenceCounted) { boolean released = ((ReferenceCounted) message).release(); reportIssueUnless(released, context, "failed to release message: {}", message); } } } /** * The {@link Channel} of the {@link ChannelHandlerContext} has fully consumed the most-recent message read. * <p> * If {@link ChannelOption * {@link Channel} will be made until {@link ChannelHandlerContext * for outbound messages to be written. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelReadComplete(final ChannelHandlerContext context) { this.traceOperation(context, "channelReadComplete"); this.timestamps.channelReadCompleted(); context.fireChannelReadComplete(); } /** * Constructs a {@link CoalescingBufferQueue} for buffering encoded requests until we have an {@link RntbdRequest} * <p> * This method then calls {@link ChannelHandlerContext * {@link ChannelInboundHandler} in the {@link ChannelPipeline}. * <p> * Sub-classes may override this method to change behavior. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made */ @Override public void channelRegistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelRegistered"); reportIssueUnless(this.pendingWrites == null, context, "pendingWrites: {}", pendingWrites); this.pendingWrites = new CoalescingBufferQueue(context.channel()); context.fireChannelRegistered(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was unregistered from its {@link EventLoop} * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelUnregistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelUnregistered"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CHANNEL_UNREGISTERED); } else { logger.debug("{} channelUnregistered exceptionally", context); } context.fireChannelUnregistered(); } /** * Gets called once the writable state of a {@link Channel} changed. You can check the state with * {@link Channel * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelWritabilityChanged(final ChannelHandlerContext context) { this.traceOperation(context, "channelWritabilityChanged"); context.fireChannelWritabilityChanged(); } /** * Processes {@link ChannelHandlerContext * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param cause Exception caught */ @Override @SuppressWarnings("deprecation") public void exceptionCaught(final ChannelHandlerContext context, final Throwable cause) { this.traceOperation(context, "exceptionCaught", cause); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, cause); logger.debug("{} closing due to:", context, cause); context.flush().close(); } } /** * Processes inbound events triggered by channel handlers in the {@link RntbdClientChannelHandler} pipeline * <p> * All but inbound request management events are ignored. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param event An object representing a user event */ @Override public void userEventTriggered(final ChannelHandlerContext context, final Object event) { this.traceOperation(context, "userEventTriggered", event); try { if (event instanceof IdleStateEvent) { this.healthChecker.isHealthy(context.channel()).addListener((Future<Boolean> future) -> { final Throwable cause; if (future.isSuccess()) { if (future.get()) { return; } cause = UnhealthyChannelException.INSTANCE; } else { cause = future.cause(); } this.exceptionCaught(context, cause); }); return; } if (event instanceof RntbdContext) { this.contextFuture.complete((RntbdContext) event); this.removeContextNegotiatorAndFlushPendingWrites(context); return; } if (event instanceof RntbdContextException) { this.contextFuture.completeExceptionally((RntbdContextException) event); context.pipeline().flush().close(); return; } context.fireUserEventTriggered(event); } catch (Throwable error) { reportIssue(context, "{}: ", event, error); this.exceptionCaught(context, error); } } /** * Called once a bind operation is made. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made * @param localAddress the {@link SocketAddress} to which it should bound * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void bind(final ChannelHandlerContext context, final SocketAddress localAddress, final ChannelPromise promise) { this.traceOperation(context, "bind", localAddress); context.bind(localAddress, promise); } /** * Called once a close operation is made. * * @param context the {@link ChannelHandlerContext} for which the close operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override /** * Called once a connect operation is made. * * @param context the {@link ChannelHandlerContext} for which the connect operation is made * @param remoteAddress the {@link SocketAddress} to which it should connect * @param localAddress the {@link SocketAddress} which is used as source on connect * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void connect( final ChannelHandlerContext context, final SocketAddress remoteAddress, final SocketAddress localAddress, final ChannelPromise promise ) { this.traceOperation(context, "connect", remoteAddress, localAddress); context.connect(remoteAddress, localAddress, promise); } /** * Called once a deregister operation is made from the current registered {@link EventLoop}. * * @param context the {@link ChannelHandlerContext} for which the deregister operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void deregister(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "deregister"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_DEREGISTER); } else { logger.debug("{} deregistered exceptionally", context); } context.deregister(promise); } /** * Called once a disconnect operation is made. * * @param context the {@link ChannelHandlerContext} for which the disconnect operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void disconnect(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "disconnect"); context.disconnect(promise); } /** * Called once a flush operation is made * <p> * The flush operation will try to flush out all previous written messages that are pending. * * @param context the {@link ChannelHandlerContext} for which the flush operation is made */ @Override public void flush(final ChannelHandlerContext context) { this.traceOperation(context, "flush"); context.flush(); } /** * Intercepts {@link ChannelHandlerContext * * @param context the {@link ChannelHandlerContext} for which the read operation is made */ @Override public void read(final ChannelHandlerContext context) { this.traceOperation(context, "read"); context.read(); } /** * Called once a write operation is made * <p> * The write operation will send messages through the {@link ChannelPipeline} which are then ready to be flushed * to the actual {@link Channel}. This will occur when {@link Channel * * @param context the {@link ChannelHandlerContext} for which the write operation is made * @param message the message to write * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void write(final ChannelHandlerContext context, final Object message, final ChannelPromise promise) { this.traceOperation(context, "write", message); if (message instanceof RntbdRequestRecord) { final RntbdRequestRecord record = (RntbdRequestRecord) message; this.timestamps.channelWriteAttempted(); record.setSendingRequestHasStarted(); context.write(this.addPendingRequestRecord(context, record), promise).addListener(completed -> { record.stage(RntbdRequestRecord.Stage.SENT); if (completed.isSuccess()) { this.timestamps.channelWriteCompleted(); } }); return; } if (message == RntbdHealthCheckRequest.MESSAGE) { context.write(RntbdHealthCheckRequest.MESSAGE, promise).addListener(completed -> { if (completed.isSuccess()) { this.timestamps.channelPingCompleted(); } }); return; } final IllegalStateException error = new IllegalStateException(lenientFormat("message of %s: %s", message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } int pendingRequestCount() { return this.pendingRequests.size(); } Optional<RntbdContext> rntbdContext() { return Optional.of(this.contextFuture.getNow(null)); } CompletableFuture<RntbdContextRequest> rntbdContextRequestFuture() { return this.contextRequestFuture; } boolean hasRequestedRntbdContext() { return this.contextRequestFuture.getNow(null) != null; } boolean hasRntbdContext() { return this.contextFuture.getNow(null) != null; } boolean isServiceable(final int demand) { reportIssueUnless(this.hasRequestedRntbdContext(), this, "Direct TCP context request was not issued"); final int limit = this.hasRntbdContext() ? this.pendingRequestLimit : Math.min(this.pendingRequestLimit, demand); return this.pendingRequests.size() < limit; } void pendWrite(final ByteBuf out, final ChannelPromise promise) { this.pendingWrites.add(out, promise); } Timestamps snapshotTimestamps() { return new Timestamps(this.timestamps); } private RntbdRequestRecord addPendingRequestRecord(final ChannelHandlerContext context, final RntbdRequestRecord record) { return this.pendingRequests.compute(record.transportRequestId(), (id, current) -> { reportIssueUnless(current == null, context, "id: {}, current: {}, request: {}", record); record.pendingRequestQueueSize(pendingRequests.size()); final Timeout pendingRequestTimeout = record.newTimeout(timeout -> { requestExpirationExecutor.execute(record::expire); }); record.whenComplete((response, error) -> { this.pendingRequests.remove(id); pendingRequestTimeout.cancel(); }); return record; }); } private void completeAllPendingRequestsExceptionally( final ChannelHandlerContext context, final Throwable throwable ) { reportIssueUnless(!this.closingExceptionally, context, "", throwable); this.closingExceptionally = true; if (this.pendingWrites != null && !this.pendingWrites.isEmpty()) { this.pendingWrites.releaseAndFailAll(context, throwable); } if (this.pendingRequests.isEmpty()) { return; } if (!this.contextRequestFuture.isDone()) { this.contextRequestFuture.completeExceptionally(throwable); } if (!this.contextFuture.isDone()) { this.contextFuture.completeExceptionally(throwable); } final int count = this.pendingRequests.size(); Exception contextRequestException = null; String phrase = null; if (this.contextRequestFuture.isCompletedExceptionally()) { try { this.contextRequestFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request write cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request write failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request write failed"; contextRequestException = new ChannelException(error); } } else if (this.contextFuture.isCompletedExceptionally()) { try { this.contextFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request read cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request read failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request read failed"; contextRequestException = new ChannelException(error); } } else { phrase = "closed exceptionally"; } final String message = lenientFormat("%s %s with %s pending requests", context, phrase, count); final Exception cause; if (throwable instanceof ClosedChannelException) { cause = contextRequestException == null ? (ClosedChannelException) throwable : contextRequestException; } else { cause = throwable instanceof Exception ? (Exception) throwable : new ChannelException(throwable); } for (RntbdRequestRecord record : this.pendingRequests.values()) { final Map<String, String> requestHeaders = record.args().serviceRequest().getHeaders(); final String requestUri = record.args().physicalAddress().toString(); final GoneException error = new GoneException(message, cause, null, requestUri); BridgeInternal.setRequestHeaders(error, requestHeaders); record.completeExceptionally(error); } } /** * This method is called for each incoming message of type {@link RntbdResponse} to complete a request. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager request manager} belongs. * @param response the {@link RntbdResponse message} received. */ private void messageReceived(final ChannelHandlerContext context, final RntbdResponse response) { final Long transportRequestId = response.getTransportRequestId(); if (transportRequestId == null) { reportIssue(context, "response ignored because its transportRequestId is missing: {}", response); return; } final RntbdRequestRecord requestRecord = this.pendingRequests.get(transportRequestId); if (requestRecord == null) { logger.debug("response {} ignored because its requestRecord is missing: {}", transportRequestId, response); return; } requestRecord.responseLength(response.getMessageLength()); requestRecord.stage(RntbdRequestRecord.Stage.RECEIVED); final HttpResponseStatus status = response.getStatus(); final UUID activityId = response.getActivityId(); final int statusCode = status.code(); if (HttpResponseStatus.OK.code() <= statusCode && statusCode < HttpResponseStatus.MULTIPLE_CHOICES.code()) { final StoreResponse storeResponse = response.toStoreResponse(this.contextFuture.getNow(null)); requestRecord.complete(storeResponse); } else { final CosmosException cause; final long lsn = response.getHeader(RntbdResponseHeader.LSN); final String partitionKeyRangeId = response.getHeader(RntbdResponseHeader.PartitionKeyRangeId); final CosmosError error = response.hasPayload() ? new CosmosError(RntbdObjectMapper.readTree(response)) : new CosmosError(Integer.toString(statusCode), status.reasonPhrase(), status.codeClass().name()); final Map<String, String> responseHeaders = response.getHeaders().asMap( this.rntbdContext().orElseThrow(IllegalStateException::new), activityId ); switch (status.code()) { case StatusCodes.BADREQUEST: cause = new BadRequestException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.CONFLICT: cause = new ConflictException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.FORBIDDEN: cause = new ForbiddenException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.GONE: final int subStatusCode = Math.toIntExact(response.getHeader(RntbdResponseHeader.SubStatus)); switch (subStatusCode) { case SubStatusCodes.COMPLETING_SPLIT: cause = new PartitionKeyRangeIsSplittingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.COMPLETING_PARTITION_MIGRATION: cause = new PartitionIsMigratingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.NAME_CACHE_IS_STALE: cause = new InvalidPartitionException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.PARTITION_KEY_RANGE_GONE: cause = new PartitionKeyRangeGoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = new GoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; } break; case StatusCodes.INTERNAL_SERVER_ERROR: cause = new InternalServerErrorException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.LOCKED: cause = new LockedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.METHOD_NOT_ALLOWED: cause = new MethodNotAllowedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.NOTFOUND: cause = new NotFoundException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.PRECONDITION_FAILED: cause = new PreconditionFailedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_ENTITY_TOO_LARGE: cause = new RequestEntityTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_TIMEOUT: Exception inner = new RequestTimeoutException(error, lsn, partitionKeyRangeId, responseHeaders); String resourceAddress = requestRecord.args().physicalAddress() != null ? requestRecord.args().physicalAddress().toString() : null; cause = new GoneException(resourceAddress, error, lsn, partitionKeyRangeId, responseHeaders, inner); break; case StatusCodes.RETRY_WITH: cause = new RetryWithException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.SERVICE_UNAVAILABLE: cause = new ServiceUnavailableException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.TOO_MANY_REQUESTS: cause = new RequestRateTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.UNAUTHORIZED: cause = new UnauthorizedException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = BridgeInternal.createCosmosException(status.code(), error, responseHeaders); break; } requestRecord.completeExceptionally(cause); } } private void removeContextNegotiatorAndFlushPendingWrites(final ChannelHandlerContext context) { final RntbdContextNegotiator negotiator = context.pipeline().get(RntbdContextNegotiator.class); negotiator.removeInboundHandler(); negotiator.removeOutboundHandler(); if (!this.pendingWrites.isEmpty()) { this.pendingWrites.writeAndRemoveAll(context); context.flush(); } } private static void reportIssue(final Object subject, final String format, final Object... args) { RntbdReporter.reportIssue(logger, subject, format, args); } private static void reportIssueUnless( final boolean predicate, final Object subject, final String format, final Object... args ) { RntbdReporter.reportIssueUnless(logger, predicate, subject, format, args); } private void traceOperation(final ChannelHandlerContext context, final String operationName, final Object... args) { logger.debug("{}\n{}\n{}", operationName, context, args); } private static final class UnhealthyChannelException extends ChannelException { static final UnhealthyChannelException INSTANCE = new UnhealthyChannelException(); private UnhealthyChannelException() { super("health check failed"); } @Override public Throwable fillInStackTrace() { return this; } } }
class RntbdRequestManager implements ChannelHandler, ChannelInboundHandler, ChannelOutboundHandler { private static final ClosedChannelException ON_CHANNEL_UNREGISTERED = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "channelUnregistered"); private static final ClosedChannelException ON_CLOSE = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "close"); private static final ClosedChannelException ON_DEREGISTER = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "deregister"); private static final EventExecutor requestExpirationExecutor = new DefaultEventExecutor(new RntbdThreadFactory( "request-expirator", true, Thread.NORM_PRIORITY)); private static final Logger logger = LoggerFactory.getLogger(RntbdRequestManager.class); private final CompletableFuture<RntbdContext> contextFuture = new CompletableFuture<>(); private final CompletableFuture<RntbdContextRequest> contextRequestFuture = new CompletableFuture<>(); private final ChannelHealthChecker healthChecker; private final int pendingRequestLimit; private final ConcurrentHashMap<Long, RntbdRequestRecord> pendingRequests; private final Timestamps timestamps = new Timestamps(); private boolean closingExceptionally = false; private CoalescingBufferQueue pendingWrites; public RntbdRequestManager(final ChannelHealthChecker healthChecker, final int pendingRequestLimit) { checkArgument(pendingRequestLimit > 0, "pendingRequestLimit: %s", pendingRequestLimit); checkNotNull(healthChecker, "healthChecker"); this.pendingRequests = new ConcurrentHashMap<>(pendingRequestLimit); this.pendingRequestLimit = pendingRequestLimit; this.healthChecker = healthChecker; } /** * Gets called after the {@link ChannelHandler} was added to the actual context and it's ready to handle events. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerAdded(final ChannelHandlerContext context) { this.traceOperation(context, "handlerAdded"); } /** * Gets called after the {@link ChannelHandler} was removed from the actual context and it doesn't handle events * anymore. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerRemoved(final ChannelHandlerContext context) { this.traceOperation(context, "handlerRemoved"); } /** * The {@link Channel} of the {@link ChannelHandlerContext} is now active * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelActive(final ChannelHandlerContext context) { this.traceOperation(context, "channelActive"); context.fireChannelActive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was registered and has reached the end of its lifetime * <p> * This method will only be called after the channel is closed. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelInactive(final ChannelHandlerContext context) { this.traceOperation(context, "channelInactive"); context.fireChannelInactive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} has read a message from its peer. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs. * @param message The message read. */ @Override public void channelRead(final ChannelHandlerContext context, final Object message) { this.traceOperation(context, "channelRead"); try { if (message.getClass() == RntbdResponse.class) { try { this.messageReceived(context, (RntbdResponse) message); } catch (CorruptedFrameException error) { this.exceptionCaught(context, error); } catch (Throwable throwable) { reportIssue(context, "{} ", message, throwable); this.exceptionCaught(context, throwable); } } else { final IllegalStateException error = new IllegalStateException( lenientFormat("expected message of %s, not %s: %s", RntbdResponse.class, message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } } finally { if (message instanceof ReferenceCounted) { boolean released = ((ReferenceCounted) message).release(); reportIssueUnless(released, context, "failed to release message: {}", message); } } } /** * The {@link Channel} of the {@link ChannelHandlerContext} has fully consumed the most-recent message read. * <p> * If {@link ChannelOption * {@link Channel} will be made until {@link ChannelHandlerContext * for outbound messages to be written. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelReadComplete(final ChannelHandlerContext context) { this.traceOperation(context, "channelReadComplete"); this.timestamps.channelReadCompleted(); context.fireChannelReadComplete(); } /** * Constructs a {@link CoalescingBufferQueue} for buffering encoded requests until we have an {@link RntbdRequest} * <p> * This method then calls {@link ChannelHandlerContext * {@link ChannelInboundHandler} in the {@link ChannelPipeline}. * <p> * Sub-classes may override this method to change behavior. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made */ @Override public void channelRegistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelRegistered"); reportIssueUnless(this.pendingWrites == null, context, "pendingWrites: {}", pendingWrites); this.pendingWrites = new CoalescingBufferQueue(context.channel()); context.fireChannelRegistered(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was unregistered from its {@link EventLoop} * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelUnregistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelUnregistered"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CHANNEL_UNREGISTERED); } else { logger.debug("{} channelUnregistered exceptionally", context); } context.fireChannelUnregistered(); } /** * Gets called once the writable state of a {@link Channel} changed. You can check the state with * {@link Channel * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelWritabilityChanged(final ChannelHandlerContext context) { this.traceOperation(context, "channelWritabilityChanged"); context.fireChannelWritabilityChanged(); } /** * Processes {@link ChannelHandlerContext * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param cause Exception caught */ @Override @SuppressWarnings("deprecation") public void exceptionCaught(final ChannelHandlerContext context, final Throwable cause) { this.traceOperation(context, "exceptionCaught", cause); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, cause); logger.debug("{} closing due to:", context, cause); context.flush().close(); } } /** * Processes inbound events triggered by channel handlers in the {@link RntbdClientChannelHandler} pipeline * <p> * All but inbound request management events are ignored. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param event An object representing a user event */ @Override public void userEventTriggered(final ChannelHandlerContext context, final Object event) { this.traceOperation(context, "userEventTriggered", event); try { if (event instanceof IdleStateEvent) { this.healthChecker.isHealthy(context.channel()).addListener((Future<Boolean> future) -> { final Throwable cause; if (future.isSuccess()) { if (future.get()) { return; } cause = UnhealthyChannelException.INSTANCE; } else { cause = future.cause(); } this.exceptionCaught(context, cause); }); return; } if (event instanceof RntbdContext) { this.contextFuture.complete((RntbdContext) event); this.removeContextNegotiatorAndFlushPendingWrites(context); return; } if (event instanceof RntbdContextException) { this.contextFuture.completeExceptionally((RntbdContextException) event); context.pipeline().flush().close(); return; } context.fireUserEventTriggered(event); } catch (Throwable error) { reportIssue(context, "{}: ", event, error); this.exceptionCaught(context, error); } } /** * Called once a bind operation is made. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made * @param localAddress the {@link SocketAddress} to which it should bound * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void bind(final ChannelHandlerContext context, final SocketAddress localAddress, final ChannelPromise promise) { this.traceOperation(context, "bind", localAddress); context.bind(localAddress, promise); } /** * Called once a close operation is made. * * @param context the {@link ChannelHandlerContext} for which the close operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override /** * Called once a connect operation is made. * * @param context the {@link ChannelHandlerContext} for which the connect operation is made * @param remoteAddress the {@link SocketAddress} to which it should connect * @param localAddress the {@link SocketAddress} which is used as source on connect * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void connect( final ChannelHandlerContext context, final SocketAddress remoteAddress, final SocketAddress localAddress, final ChannelPromise promise ) { this.traceOperation(context, "connect", remoteAddress, localAddress); context.connect(remoteAddress, localAddress, promise); } /** * Called once a deregister operation is made from the current registered {@link EventLoop}. * * @param context the {@link ChannelHandlerContext} for which the deregister operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void deregister(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "deregister"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_DEREGISTER); } else { logger.debug("{} deregistered exceptionally", context); } context.deregister(promise); } /** * Called once a disconnect operation is made. * * @param context the {@link ChannelHandlerContext} for which the disconnect operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void disconnect(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "disconnect"); context.disconnect(promise); } /** * Called once a flush operation is made * <p> * The flush operation will try to flush out all previous written messages that are pending. * * @param context the {@link ChannelHandlerContext} for which the flush operation is made */ @Override public void flush(final ChannelHandlerContext context) { this.traceOperation(context, "flush"); context.flush(); } /** * Intercepts {@link ChannelHandlerContext * * @param context the {@link ChannelHandlerContext} for which the read operation is made */ @Override public void read(final ChannelHandlerContext context) { this.traceOperation(context, "read"); context.read(); } /** * Called once a write operation is made * <p> * The write operation will send messages through the {@link ChannelPipeline} which are then ready to be flushed * to the actual {@link Channel}. This will occur when {@link Channel * * @param context the {@link ChannelHandlerContext} for which the write operation is made * @param message the message to write * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void write(final ChannelHandlerContext context, final Object message, final ChannelPromise promise) { this.traceOperation(context, "write", message); if (message instanceof RntbdRequestRecord) { final RntbdRequestRecord record = (RntbdRequestRecord) message; this.timestamps.channelWriteAttempted(); record.setSendingRequestHasStarted(); context.write(this.addPendingRequestRecord(context, record), promise).addListener(completed -> { record.stage(RntbdRequestRecord.Stage.SENT); if (completed.isSuccess()) { this.timestamps.channelWriteCompleted(); } }); return; } if (message == RntbdHealthCheckRequest.MESSAGE) { context.write(RntbdHealthCheckRequest.MESSAGE, promise).addListener(completed -> { if (completed.isSuccess()) { this.timestamps.channelPingCompleted(); } }); return; } final IllegalStateException error = new IllegalStateException(lenientFormat("message of %s: %s", message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } int pendingRequestCount() { return this.pendingRequests.size(); } Optional<RntbdContext> rntbdContext() { return Optional.of(this.contextFuture.getNow(null)); } CompletableFuture<RntbdContextRequest> rntbdContextRequestFuture() { return this.contextRequestFuture; } boolean hasRequestedRntbdContext() { return this.contextRequestFuture.getNow(null) != null; } boolean hasRntbdContext() { return this.contextFuture.getNow(null) != null; } boolean isServiceable(final int demand) { reportIssueUnless(this.hasRequestedRntbdContext(), this, "Direct TCP context request was not issued"); final int limit = this.hasRntbdContext() ? this.pendingRequestLimit : Math.min(this.pendingRequestLimit, demand); return this.pendingRequests.size() < limit; } void pendWrite(final ByteBuf out, final ChannelPromise promise) { this.pendingWrites.add(out, promise); } Timestamps snapshotTimestamps() { return new Timestamps(this.timestamps); } private RntbdRequestRecord addPendingRequestRecord(final ChannelHandlerContext context, final RntbdRequestRecord record) { return this.pendingRequests.compute(record.transportRequestId(), (id, current) -> { reportIssueUnless(current == null, context, "id: {}, current: {}, request: {}", record); record.pendingRequestQueueSize(pendingRequests.size()); final Timeout pendingRequestTimeout = record.newTimeout(timeout -> { requestExpirationExecutor.execute(record::expire); }); record.whenComplete((response, error) -> { this.pendingRequests.remove(id); pendingRequestTimeout.cancel(); }); return record; }); } private void completeAllPendingRequestsExceptionally( final ChannelHandlerContext context, final Throwable throwable ) { reportIssueUnless(!this.closingExceptionally, context, "", throwable); this.closingExceptionally = true; if (this.pendingWrites != null && !this.pendingWrites.isEmpty()) { this.pendingWrites.releaseAndFailAll(context, throwable); } if (this.pendingRequests.isEmpty()) { return; } if (!this.contextRequestFuture.isDone()) { this.contextRequestFuture.completeExceptionally(throwable); } if (!this.contextFuture.isDone()) { this.contextFuture.completeExceptionally(throwable); } final int count = this.pendingRequests.size(); Exception contextRequestException = null; String phrase = null; if (this.contextRequestFuture.isCompletedExceptionally()) { try { this.contextRequestFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request write cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request write failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request write failed"; contextRequestException = new ChannelException(error); } } else if (this.contextFuture.isCompletedExceptionally()) { try { this.contextFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request read cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request read failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request read failed"; contextRequestException = new ChannelException(error); } } else { phrase = "closed exceptionally"; } final String message = lenientFormat("%s %s with %s pending requests", context, phrase, count); final Exception cause; if (throwable instanceof ClosedChannelException) { cause = contextRequestException == null ? (ClosedChannelException) throwable : contextRequestException; } else { cause = throwable instanceof Exception ? (Exception) throwable : new ChannelException(throwable); } for (RntbdRequestRecord record : this.pendingRequests.values()) { final Map<String, String> requestHeaders = record.args().serviceRequest().getHeaders(); final String requestUri = record.args().physicalAddress().toString(); final GoneException error = new GoneException(message, cause, null, requestUri); BridgeInternal.setRequestHeaders(error, requestHeaders); record.completeExceptionally(error); } } /** * This method is called for each incoming message of type {@link RntbdResponse} to complete a request. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager request manager} belongs. * @param response the {@link RntbdResponse message} received. */ private void messageReceived(final ChannelHandlerContext context, final RntbdResponse response) { final Long transportRequestId = response.getTransportRequestId(); if (transportRequestId == null) { reportIssue(context, "response ignored because its transportRequestId is missing: {}", response); return; } final RntbdRequestRecord requestRecord = this.pendingRequests.get(transportRequestId); if (requestRecord == null) { logger.debug("response {} ignored because its requestRecord is missing: {}", transportRequestId, response); return; } requestRecord.responseLength(response.getMessageLength()); requestRecord.stage(RntbdRequestRecord.Stage.RECEIVED); final HttpResponseStatus status = response.getStatus(); final UUID activityId = response.getActivityId(); final int statusCode = status.code(); if (HttpResponseStatus.OK.code() <= statusCode && statusCode < HttpResponseStatus.MULTIPLE_CHOICES.code()) { final StoreResponse storeResponse = response.toStoreResponse(this.contextFuture.getNow(null)); requestRecord.complete(storeResponse); } else { final CosmosException cause; final long lsn = response.getHeader(RntbdResponseHeader.LSN); final String partitionKeyRangeId = response.getHeader(RntbdResponseHeader.PartitionKeyRangeId); final CosmosError error = response.hasPayload() ? new CosmosError(RntbdObjectMapper.readTree(response)) : new CosmosError(Integer.toString(statusCode), status.reasonPhrase(), status.codeClass().name()); final Map<String, String> responseHeaders = response.getHeaders().asMap( this.rntbdContext().orElseThrow(IllegalStateException::new), activityId ); switch (status.code()) { case StatusCodes.BADREQUEST: cause = new BadRequestException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.CONFLICT: cause = new ConflictException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.FORBIDDEN: cause = new ForbiddenException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.GONE: final int subStatusCode = Math.toIntExact(response.getHeader(RntbdResponseHeader.SubStatus)); switch (subStatusCode) { case SubStatusCodes.COMPLETING_SPLIT: cause = new PartitionKeyRangeIsSplittingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.COMPLETING_PARTITION_MIGRATION: cause = new PartitionIsMigratingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.NAME_CACHE_IS_STALE: cause = new InvalidPartitionException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.PARTITION_KEY_RANGE_GONE: cause = new PartitionKeyRangeGoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = new GoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; } break; case StatusCodes.INTERNAL_SERVER_ERROR: cause = new InternalServerErrorException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.LOCKED: cause = new LockedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.METHOD_NOT_ALLOWED: cause = new MethodNotAllowedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.NOTFOUND: cause = new NotFoundException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.PRECONDITION_FAILED: cause = new PreconditionFailedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_ENTITY_TOO_LARGE: cause = new RequestEntityTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_TIMEOUT: Exception inner = new RequestTimeoutException(error, lsn, partitionKeyRangeId, responseHeaders); String resourceAddress = requestRecord.args().physicalAddress() != null ? requestRecord.args().physicalAddress().toString() : null; cause = new GoneException(resourceAddress, error, lsn, partitionKeyRangeId, responseHeaders, inner); break; case StatusCodes.RETRY_WITH: cause = new RetryWithException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.SERVICE_UNAVAILABLE: cause = new ServiceUnavailableException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.TOO_MANY_REQUESTS: cause = new RequestRateTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.UNAUTHORIZED: cause = new UnauthorizedException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = BridgeInternal.createCosmosException(status.code(), error, responseHeaders); break; } requestRecord.completeExceptionally(cause); } } private void removeContextNegotiatorAndFlushPendingWrites(final ChannelHandlerContext context) { final RntbdContextNegotiator negotiator = context.pipeline().get(RntbdContextNegotiator.class); negotiator.removeInboundHandler(); negotiator.removeOutboundHandler(); if (!this.pendingWrites.isEmpty()) { this.pendingWrites.writeAndRemoveAll(context); context.flush(); } } private static void reportIssue(final Object subject, final String format, final Object... args) { RntbdReporter.reportIssue(logger, subject, format, args); } private static void reportIssueUnless( final boolean predicate, final Object subject, final String format, final Object... args ) { RntbdReporter.reportIssueUnless(logger, predicate, subject, format, args); } private void traceOperation(final ChannelHandlerContext context, final String operationName, final Object... args) { logger.debug("{}\n{}\n{}", operationName, context, args); } private static final class UnhealthyChannelException extends ChannelException { static final UnhealthyChannelException INSTANCE = new UnhealthyChannelException(); private UnhealthyChannelException() { super("health check failed"); } @Override public Throwable fillInStackTrace() { return this; } } }
For ReactorNettyClient Kushagra had some discussions with the owner of ReatorNettyClient - https://github.com/reactor/reactor-netty/issues/1165 My understanding reading through the work item is that this addressed the issues - but I will double-check with @kushagraThapar when he is back next week
public void close(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "close"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CLOSE); } else { logger.debug("{} closed exceptionally", context); } final SslHandler sslHandler = context.pipeline().get(SslHandler.class); if (sslHandler != null) { try { sslHandler.closeOutbound(); } catch (Exception exception) { if (exception instanceof SSLException) { logger.debug( "SslException when attempting to close the outbound SSL connection: {}", exception.toString()); } else { logger.warn( "Exception when attempting to close the outbound SSL connection: {}", exception.toString()); throw exception; } } } context.close(promise); }
if (exception instanceof SSLException) {
public void close(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "close"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CLOSE); } else { logger.debug("{} closed exceptionally", context); } final SslHandler sslHandler = context.pipeline().get(SslHandler.class); if (sslHandler != null) { try { sslHandler.closeOutbound(); } catch (Exception exception) { if (exception instanceof SSLException) { logger.debug( "SslException when attempting to close the outbound SSL connection: ", exception); } else { logger.warn( "Exception when attempting to close the outbound SSL connection: ", exception); throw exception; } } } context.close(promise); }
class RntbdRequestManager implements ChannelHandler, ChannelInboundHandler, ChannelOutboundHandler { private static final ClosedChannelException ON_CHANNEL_UNREGISTERED = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "channelUnregistered"); private static final ClosedChannelException ON_CLOSE = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "close"); private static final ClosedChannelException ON_DEREGISTER = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "deregister"); private static final EventExecutor requestExpirationExecutor = new DefaultEventExecutor(new RntbdThreadFactory( "request-expirator", true, Thread.NORM_PRIORITY)); private static final Logger logger = LoggerFactory.getLogger(RntbdRequestManager.class); private final CompletableFuture<RntbdContext> contextFuture = new CompletableFuture<>(); private final CompletableFuture<RntbdContextRequest> contextRequestFuture = new CompletableFuture<>(); private final ChannelHealthChecker healthChecker; private final int pendingRequestLimit; private final ConcurrentHashMap<Long, RntbdRequestRecord> pendingRequests; private final Timestamps timestamps = new Timestamps(); private boolean closingExceptionally = false; private CoalescingBufferQueue pendingWrites; public RntbdRequestManager(final ChannelHealthChecker healthChecker, final int pendingRequestLimit) { checkArgument(pendingRequestLimit > 0, "pendingRequestLimit: %s", pendingRequestLimit); checkNotNull(healthChecker, "healthChecker"); this.pendingRequests = new ConcurrentHashMap<>(pendingRequestLimit); this.pendingRequestLimit = pendingRequestLimit; this.healthChecker = healthChecker; } /** * Gets called after the {@link ChannelHandler} was added to the actual context and it's ready to handle events. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerAdded(final ChannelHandlerContext context) { this.traceOperation(context, "handlerAdded"); } /** * Gets called after the {@link ChannelHandler} was removed from the actual context and it doesn't handle events * anymore. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerRemoved(final ChannelHandlerContext context) { this.traceOperation(context, "handlerRemoved"); } /** * The {@link Channel} of the {@link ChannelHandlerContext} is now active * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelActive(final ChannelHandlerContext context) { this.traceOperation(context, "channelActive"); context.fireChannelActive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was registered and has reached the end of its lifetime * <p> * This method will only be called after the channel is closed. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelInactive(final ChannelHandlerContext context) { this.traceOperation(context, "channelInactive"); context.fireChannelInactive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} has read a message from its peer. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs. * @param message The message read. */ @Override public void channelRead(final ChannelHandlerContext context, final Object message) { this.traceOperation(context, "channelRead"); try { if (message.getClass() == RntbdResponse.class) { try { this.messageReceived(context, (RntbdResponse) message); } catch (CorruptedFrameException error) { this.exceptionCaught(context, error); } catch (Throwable throwable) { reportIssue(context, "{} ", message, throwable); this.exceptionCaught(context, throwable); } } else { final IllegalStateException error = new IllegalStateException( lenientFormat("expected message of %s, not %s: %s", RntbdResponse.class, message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } } finally { if (message instanceof ReferenceCounted) { boolean released = ((ReferenceCounted) message).release(); reportIssueUnless(released, context, "failed to release message: {}", message); } } } /** * The {@link Channel} of the {@link ChannelHandlerContext} has fully consumed the most-recent message read. * <p> * If {@link ChannelOption * {@link Channel} will be made until {@link ChannelHandlerContext * for outbound messages to be written. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelReadComplete(final ChannelHandlerContext context) { this.traceOperation(context, "channelReadComplete"); this.timestamps.channelReadCompleted(); context.fireChannelReadComplete(); } /** * Constructs a {@link CoalescingBufferQueue} for buffering encoded requests until we have an {@link RntbdRequest} * <p> * This method then calls {@link ChannelHandlerContext * {@link ChannelInboundHandler} in the {@link ChannelPipeline}. * <p> * Sub-classes may override this method to change behavior. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made */ @Override public void channelRegistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelRegistered"); reportIssueUnless(this.pendingWrites == null, context, "pendingWrites: {}", pendingWrites); this.pendingWrites = new CoalescingBufferQueue(context.channel()); context.fireChannelRegistered(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was unregistered from its {@link EventLoop} * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelUnregistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelUnregistered"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CHANNEL_UNREGISTERED); } else { logger.debug("{} channelUnregistered exceptionally", context); } context.fireChannelUnregistered(); } /** * Gets called once the writable state of a {@link Channel} changed. You can check the state with * {@link Channel * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelWritabilityChanged(final ChannelHandlerContext context) { this.traceOperation(context, "channelWritabilityChanged"); context.fireChannelWritabilityChanged(); } /** * Processes {@link ChannelHandlerContext * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param cause Exception caught */ @Override @SuppressWarnings("deprecation") public void exceptionCaught(final ChannelHandlerContext context, final Throwable cause) { this.traceOperation(context, "exceptionCaught", cause); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, cause); logger.debug("{} closing due to:", context, cause); context.flush().close(); } } /** * Processes inbound events triggered by channel handlers in the {@link RntbdClientChannelHandler} pipeline * <p> * All but inbound request management events are ignored. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param event An object representing a user event */ @Override public void userEventTriggered(final ChannelHandlerContext context, final Object event) { this.traceOperation(context, "userEventTriggered", event); try { if (event instanceof IdleStateEvent) { this.healthChecker.isHealthy(context.channel()).addListener((Future<Boolean> future) -> { final Throwable cause; if (future.isSuccess()) { if (future.get()) { return; } cause = UnhealthyChannelException.INSTANCE; } else { cause = future.cause(); } this.exceptionCaught(context, cause); }); return; } if (event instanceof RntbdContext) { this.contextFuture.complete((RntbdContext) event); this.removeContextNegotiatorAndFlushPendingWrites(context); return; } if (event instanceof RntbdContextException) { this.contextFuture.completeExceptionally((RntbdContextException) event); context.pipeline().flush().close(); return; } context.fireUserEventTriggered(event); } catch (Throwable error) { reportIssue(context, "{}: ", event, error); this.exceptionCaught(context, error); } } /** * Called once a bind operation is made. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made * @param localAddress the {@link SocketAddress} to which it should bound * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void bind(final ChannelHandlerContext context, final SocketAddress localAddress, final ChannelPromise promise) { this.traceOperation(context, "bind", localAddress); context.bind(localAddress, promise); } /** * Called once a close operation is made. * * @param context the {@link ChannelHandlerContext} for which the close operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override /** * Called once a connect operation is made. * * @param context the {@link ChannelHandlerContext} for which the connect operation is made * @param remoteAddress the {@link SocketAddress} to which it should connect * @param localAddress the {@link SocketAddress} which is used as source on connect * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void connect( final ChannelHandlerContext context, final SocketAddress remoteAddress, final SocketAddress localAddress, final ChannelPromise promise ) { this.traceOperation(context, "connect", remoteAddress, localAddress); context.connect(remoteAddress, localAddress, promise); } /** * Called once a deregister operation is made from the current registered {@link EventLoop}. * * @param context the {@link ChannelHandlerContext} for which the deregister operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void deregister(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "deregister"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_DEREGISTER); } else { logger.debug("{} deregistered exceptionally", context); } context.deregister(promise); } /** * Called once a disconnect operation is made. * * @param context the {@link ChannelHandlerContext} for which the disconnect operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void disconnect(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "disconnect"); context.disconnect(promise); } /** * Called once a flush operation is made * <p> * The flush operation will try to flush out all previous written messages that are pending. * * @param context the {@link ChannelHandlerContext} for which the flush operation is made */ @Override public void flush(final ChannelHandlerContext context) { this.traceOperation(context, "flush"); context.flush(); } /** * Intercepts {@link ChannelHandlerContext * * @param context the {@link ChannelHandlerContext} for which the read operation is made */ @Override public void read(final ChannelHandlerContext context) { this.traceOperation(context, "read"); context.read(); } /** * Called once a write operation is made * <p> * The write operation will send messages through the {@link ChannelPipeline} which are then ready to be flushed * to the actual {@link Channel}. This will occur when {@link Channel * * @param context the {@link ChannelHandlerContext} for which the write operation is made * @param message the message to write * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void write(final ChannelHandlerContext context, final Object message, final ChannelPromise promise) { this.traceOperation(context, "write", message); if (message instanceof RntbdRequestRecord) { final RntbdRequestRecord record = (RntbdRequestRecord) message; this.timestamps.channelWriteAttempted(); record.setSendingRequestHasStarted(); context.write(this.addPendingRequestRecord(context, record), promise).addListener(completed -> { record.stage(RntbdRequestRecord.Stage.SENT); if (completed.isSuccess()) { this.timestamps.channelWriteCompleted(); } }); return; } if (message == RntbdHealthCheckRequest.MESSAGE) { context.write(RntbdHealthCheckRequest.MESSAGE, promise).addListener(completed -> { if (completed.isSuccess()) { this.timestamps.channelPingCompleted(); } }); return; } final IllegalStateException error = new IllegalStateException(lenientFormat("message of %s: %s", message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } int pendingRequestCount() { return this.pendingRequests.size(); } Optional<RntbdContext> rntbdContext() { return Optional.of(this.contextFuture.getNow(null)); } CompletableFuture<RntbdContextRequest> rntbdContextRequestFuture() { return this.contextRequestFuture; } boolean hasRequestedRntbdContext() { return this.contextRequestFuture.getNow(null) != null; } boolean hasRntbdContext() { return this.contextFuture.getNow(null) != null; } boolean isServiceable(final int demand) { reportIssueUnless(this.hasRequestedRntbdContext(), this, "Direct TCP context request was not issued"); final int limit = this.hasRntbdContext() ? this.pendingRequestLimit : Math.min(this.pendingRequestLimit, demand); return this.pendingRequests.size() < limit; } void pendWrite(final ByteBuf out, final ChannelPromise promise) { this.pendingWrites.add(out, promise); } Timestamps snapshotTimestamps() { return new Timestamps(this.timestamps); } private RntbdRequestRecord addPendingRequestRecord(final ChannelHandlerContext context, final RntbdRequestRecord record) { return this.pendingRequests.compute(record.transportRequestId(), (id, current) -> { reportIssueUnless(current == null, context, "id: {}, current: {}, request: {}", record); record.pendingRequestQueueSize(pendingRequests.size()); final Timeout pendingRequestTimeout = record.newTimeout(timeout -> { requestExpirationExecutor.execute(record::expire); }); record.whenComplete((response, error) -> { this.pendingRequests.remove(id); pendingRequestTimeout.cancel(); }); return record; }); } private void completeAllPendingRequestsExceptionally( final ChannelHandlerContext context, final Throwable throwable ) { reportIssueUnless(!this.closingExceptionally, context, "", throwable); this.closingExceptionally = true; if (this.pendingWrites != null && !this.pendingWrites.isEmpty()) { this.pendingWrites.releaseAndFailAll(context, throwable); } if (this.pendingRequests.isEmpty()) { return; } if (!this.contextRequestFuture.isDone()) { this.contextRequestFuture.completeExceptionally(throwable); } if (!this.contextFuture.isDone()) { this.contextFuture.completeExceptionally(throwable); } final int count = this.pendingRequests.size(); Exception contextRequestException = null; String phrase = null; if (this.contextRequestFuture.isCompletedExceptionally()) { try { this.contextRequestFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request write cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request write failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request write failed"; contextRequestException = new ChannelException(error); } } else if (this.contextFuture.isCompletedExceptionally()) { try { this.contextFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request read cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request read failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request read failed"; contextRequestException = new ChannelException(error); } } else { phrase = "closed exceptionally"; } final String message = lenientFormat("%s %s with %s pending requests", context, phrase, count); final Exception cause; if (throwable instanceof ClosedChannelException) { cause = contextRequestException == null ? (ClosedChannelException) throwable : contextRequestException; } else { cause = throwable instanceof Exception ? (Exception) throwable : new ChannelException(throwable); } for (RntbdRequestRecord record : this.pendingRequests.values()) { final Map<String, String> requestHeaders = record.args().serviceRequest().getHeaders(); final String requestUri = record.args().physicalAddress().toString(); final GoneException error = new GoneException(message, cause, null, requestUri); BridgeInternal.setRequestHeaders(error, requestHeaders); record.completeExceptionally(error); } } /** * This method is called for each incoming message of type {@link RntbdResponse} to complete a request. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager request manager} belongs. * @param response the {@link RntbdResponse message} received. */ private void messageReceived(final ChannelHandlerContext context, final RntbdResponse response) { final Long transportRequestId = response.getTransportRequestId(); if (transportRequestId == null) { reportIssue(context, "response ignored because its transportRequestId is missing: {}", response); return; } final RntbdRequestRecord requestRecord = this.pendingRequests.get(transportRequestId); if (requestRecord == null) { logger.debug("response {} ignored because its requestRecord is missing: {}", transportRequestId, response); return; } requestRecord.responseLength(response.getMessageLength()); requestRecord.stage(RntbdRequestRecord.Stage.RECEIVED); final HttpResponseStatus status = response.getStatus(); final UUID activityId = response.getActivityId(); final int statusCode = status.code(); if (HttpResponseStatus.OK.code() <= statusCode && statusCode < HttpResponseStatus.MULTIPLE_CHOICES.code()) { final StoreResponse storeResponse = response.toStoreResponse(this.contextFuture.getNow(null)); requestRecord.complete(storeResponse); } else { final CosmosException cause; final long lsn = response.getHeader(RntbdResponseHeader.LSN); final String partitionKeyRangeId = response.getHeader(RntbdResponseHeader.PartitionKeyRangeId); final CosmosError error = response.hasPayload() ? new CosmosError(RntbdObjectMapper.readTree(response)) : new CosmosError(Integer.toString(statusCode), status.reasonPhrase(), status.codeClass().name()); final Map<String, String> responseHeaders = response.getHeaders().asMap( this.rntbdContext().orElseThrow(IllegalStateException::new), activityId ); switch (status.code()) { case StatusCodes.BADREQUEST: cause = new BadRequestException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.CONFLICT: cause = new ConflictException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.FORBIDDEN: cause = new ForbiddenException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.GONE: final int subStatusCode = Math.toIntExact(response.getHeader(RntbdResponseHeader.SubStatus)); switch (subStatusCode) { case SubStatusCodes.COMPLETING_SPLIT: cause = new PartitionKeyRangeIsSplittingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.COMPLETING_PARTITION_MIGRATION: cause = new PartitionIsMigratingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.NAME_CACHE_IS_STALE: cause = new InvalidPartitionException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.PARTITION_KEY_RANGE_GONE: cause = new PartitionKeyRangeGoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = new GoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; } break; case StatusCodes.INTERNAL_SERVER_ERROR: cause = new InternalServerErrorException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.LOCKED: cause = new LockedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.METHOD_NOT_ALLOWED: cause = new MethodNotAllowedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.NOTFOUND: cause = new NotFoundException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.PRECONDITION_FAILED: cause = new PreconditionFailedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_ENTITY_TOO_LARGE: cause = new RequestEntityTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_TIMEOUT: Exception inner = new RequestTimeoutException(error, lsn, partitionKeyRangeId, responseHeaders); String resourceAddress = requestRecord.args().physicalAddress() != null ? requestRecord.args().physicalAddress().toString() : null; cause = new GoneException(resourceAddress, error, lsn, partitionKeyRangeId, responseHeaders, inner); break; case StatusCodes.RETRY_WITH: cause = new RetryWithException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.SERVICE_UNAVAILABLE: cause = new ServiceUnavailableException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.TOO_MANY_REQUESTS: cause = new RequestRateTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.UNAUTHORIZED: cause = new UnauthorizedException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = BridgeInternal.createCosmosException(status.code(), error, responseHeaders); break; } requestRecord.completeExceptionally(cause); } } private void removeContextNegotiatorAndFlushPendingWrites(final ChannelHandlerContext context) { final RntbdContextNegotiator negotiator = context.pipeline().get(RntbdContextNegotiator.class); negotiator.removeInboundHandler(); negotiator.removeOutboundHandler(); if (!this.pendingWrites.isEmpty()) { this.pendingWrites.writeAndRemoveAll(context); context.flush(); } } private static void reportIssue(final Object subject, final String format, final Object... args) { RntbdReporter.reportIssue(logger, subject, format, args); } private static void reportIssueUnless( final boolean predicate, final Object subject, final String format, final Object... args ) { RntbdReporter.reportIssueUnless(logger, predicate, subject, format, args); } private void traceOperation(final ChannelHandlerContext context, final String operationName, final Object... args) { logger.debug("{}\n{}\n{}", operationName, context, args); } private static final class UnhealthyChannelException extends ChannelException { static final UnhealthyChannelException INSTANCE = new UnhealthyChannelException(); private UnhealthyChannelException() { super("health check failed"); } @Override public Throwable fillInStackTrace() { return this; } } }
class RntbdRequestManager implements ChannelHandler, ChannelInboundHandler, ChannelOutboundHandler { private static final ClosedChannelException ON_CHANNEL_UNREGISTERED = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "channelUnregistered"); private static final ClosedChannelException ON_CLOSE = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "close"); private static final ClosedChannelException ON_DEREGISTER = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "deregister"); private static final EventExecutor requestExpirationExecutor = new DefaultEventExecutor(new RntbdThreadFactory( "request-expirator", true, Thread.NORM_PRIORITY)); private static final Logger logger = LoggerFactory.getLogger(RntbdRequestManager.class); private final CompletableFuture<RntbdContext> contextFuture = new CompletableFuture<>(); private final CompletableFuture<RntbdContextRequest> contextRequestFuture = new CompletableFuture<>(); private final ChannelHealthChecker healthChecker; private final int pendingRequestLimit; private final ConcurrentHashMap<Long, RntbdRequestRecord> pendingRequests; private final Timestamps timestamps = new Timestamps(); private boolean closingExceptionally = false; private CoalescingBufferQueue pendingWrites; public RntbdRequestManager(final ChannelHealthChecker healthChecker, final int pendingRequestLimit) { checkArgument(pendingRequestLimit > 0, "pendingRequestLimit: %s", pendingRequestLimit); checkNotNull(healthChecker, "healthChecker"); this.pendingRequests = new ConcurrentHashMap<>(pendingRequestLimit); this.pendingRequestLimit = pendingRequestLimit; this.healthChecker = healthChecker; } /** * Gets called after the {@link ChannelHandler} was added to the actual context and it's ready to handle events. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerAdded(final ChannelHandlerContext context) { this.traceOperation(context, "handlerAdded"); } /** * Gets called after the {@link ChannelHandler} was removed from the actual context and it doesn't handle events * anymore. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerRemoved(final ChannelHandlerContext context) { this.traceOperation(context, "handlerRemoved"); } /** * The {@link Channel} of the {@link ChannelHandlerContext} is now active * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelActive(final ChannelHandlerContext context) { this.traceOperation(context, "channelActive"); context.fireChannelActive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was registered and has reached the end of its lifetime * <p> * This method will only be called after the channel is closed. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelInactive(final ChannelHandlerContext context) { this.traceOperation(context, "channelInactive"); context.fireChannelInactive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} has read a message from its peer. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs. * @param message The message read. */ @Override public void channelRead(final ChannelHandlerContext context, final Object message) { this.traceOperation(context, "channelRead"); try { if (message.getClass() == RntbdResponse.class) { try { this.messageReceived(context, (RntbdResponse) message); } catch (CorruptedFrameException error) { this.exceptionCaught(context, error); } catch (Throwable throwable) { reportIssue(context, "{} ", message, throwable); this.exceptionCaught(context, throwable); } } else { final IllegalStateException error = new IllegalStateException( lenientFormat("expected message of %s, not %s: %s", RntbdResponse.class, message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } } finally { if (message instanceof ReferenceCounted) { boolean released = ((ReferenceCounted) message).release(); reportIssueUnless(released, context, "failed to release message: {}", message); } } } /** * The {@link Channel} of the {@link ChannelHandlerContext} has fully consumed the most-recent message read. * <p> * If {@link ChannelOption * {@link Channel} will be made until {@link ChannelHandlerContext * for outbound messages to be written. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelReadComplete(final ChannelHandlerContext context) { this.traceOperation(context, "channelReadComplete"); this.timestamps.channelReadCompleted(); context.fireChannelReadComplete(); } /** * Constructs a {@link CoalescingBufferQueue} for buffering encoded requests until we have an {@link RntbdRequest} * <p> * This method then calls {@link ChannelHandlerContext * {@link ChannelInboundHandler} in the {@link ChannelPipeline}. * <p> * Sub-classes may override this method to change behavior. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made */ @Override public void channelRegistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelRegistered"); reportIssueUnless(this.pendingWrites == null, context, "pendingWrites: {}", pendingWrites); this.pendingWrites = new CoalescingBufferQueue(context.channel()); context.fireChannelRegistered(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was unregistered from its {@link EventLoop} * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelUnregistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelUnregistered"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CHANNEL_UNREGISTERED); } else { logger.debug("{} channelUnregistered exceptionally", context); } context.fireChannelUnregistered(); } /** * Gets called once the writable state of a {@link Channel} changed. You can check the state with * {@link Channel * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelWritabilityChanged(final ChannelHandlerContext context) { this.traceOperation(context, "channelWritabilityChanged"); context.fireChannelWritabilityChanged(); } /** * Processes {@link ChannelHandlerContext * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param cause Exception caught */ @Override @SuppressWarnings("deprecation") public void exceptionCaught(final ChannelHandlerContext context, final Throwable cause) { this.traceOperation(context, "exceptionCaught", cause); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, cause); logger.debug("{} closing due to:", context, cause); context.flush().close(); } } /** * Processes inbound events triggered by channel handlers in the {@link RntbdClientChannelHandler} pipeline * <p> * All but inbound request management events are ignored. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param event An object representing a user event */ @Override public void userEventTriggered(final ChannelHandlerContext context, final Object event) { this.traceOperation(context, "userEventTriggered", event); try { if (event instanceof IdleStateEvent) { this.healthChecker.isHealthy(context.channel()).addListener((Future<Boolean> future) -> { final Throwable cause; if (future.isSuccess()) { if (future.get()) { return; } cause = UnhealthyChannelException.INSTANCE; } else { cause = future.cause(); } this.exceptionCaught(context, cause); }); return; } if (event instanceof RntbdContext) { this.contextFuture.complete((RntbdContext) event); this.removeContextNegotiatorAndFlushPendingWrites(context); return; } if (event instanceof RntbdContextException) { this.contextFuture.completeExceptionally((RntbdContextException) event); context.pipeline().flush().close(); return; } context.fireUserEventTriggered(event); } catch (Throwable error) { reportIssue(context, "{}: ", event, error); this.exceptionCaught(context, error); } } /** * Called once a bind operation is made. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made * @param localAddress the {@link SocketAddress} to which it should bound * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void bind(final ChannelHandlerContext context, final SocketAddress localAddress, final ChannelPromise promise) { this.traceOperation(context, "bind", localAddress); context.bind(localAddress, promise); } /** * Called once a close operation is made. * * @param context the {@link ChannelHandlerContext} for which the close operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override /** * Called once a connect operation is made. * * @param context the {@link ChannelHandlerContext} for which the connect operation is made * @param remoteAddress the {@link SocketAddress} to which it should connect * @param localAddress the {@link SocketAddress} which is used as source on connect * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void connect( final ChannelHandlerContext context, final SocketAddress remoteAddress, final SocketAddress localAddress, final ChannelPromise promise ) { this.traceOperation(context, "connect", remoteAddress, localAddress); context.connect(remoteAddress, localAddress, promise); } /** * Called once a deregister operation is made from the current registered {@link EventLoop}. * * @param context the {@link ChannelHandlerContext} for which the deregister operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void deregister(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "deregister"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_DEREGISTER); } else { logger.debug("{} deregistered exceptionally", context); } context.deregister(promise); } /** * Called once a disconnect operation is made. * * @param context the {@link ChannelHandlerContext} for which the disconnect operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void disconnect(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "disconnect"); context.disconnect(promise); } /** * Called once a flush operation is made * <p> * The flush operation will try to flush out all previous written messages that are pending. * * @param context the {@link ChannelHandlerContext} for which the flush operation is made */ @Override public void flush(final ChannelHandlerContext context) { this.traceOperation(context, "flush"); context.flush(); } /** * Intercepts {@link ChannelHandlerContext * * @param context the {@link ChannelHandlerContext} for which the read operation is made */ @Override public void read(final ChannelHandlerContext context) { this.traceOperation(context, "read"); context.read(); } /** * Called once a write operation is made * <p> * The write operation will send messages through the {@link ChannelPipeline} which are then ready to be flushed * to the actual {@link Channel}. This will occur when {@link Channel * * @param context the {@link ChannelHandlerContext} for which the write operation is made * @param message the message to write * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void write(final ChannelHandlerContext context, final Object message, final ChannelPromise promise) { this.traceOperation(context, "write", message); if (message instanceof RntbdRequestRecord) { final RntbdRequestRecord record = (RntbdRequestRecord) message; this.timestamps.channelWriteAttempted(); record.setSendingRequestHasStarted(); context.write(this.addPendingRequestRecord(context, record), promise).addListener(completed -> { record.stage(RntbdRequestRecord.Stage.SENT); if (completed.isSuccess()) { this.timestamps.channelWriteCompleted(); } }); return; } if (message == RntbdHealthCheckRequest.MESSAGE) { context.write(RntbdHealthCheckRequest.MESSAGE, promise).addListener(completed -> { if (completed.isSuccess()) { this.timestamps.channelPingCompleted(); } }); return; } final IllegalStateException error = new IllegalStateException(lenientFormat("message of %s: %s", message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } int pendingRequestCount() { return this.pendingRequests.size(); } Optional<RntbdContext> rntbdContext() { return Optional.of(this.contextFuture.getNow(null)); } CompletableFuture<RntbdContextRequest> rntbdContextRequestFuture() { return this.contextRequestFuture; } boolean hasRequestedRntbdContext() { return this.contextRequestFuture.getNow(null) != null; } boolean hasRntbdContext() { return this.contextFuture.getNow(null) != null; } boolean isServiceable(final int demand) { reportIssueUnless(this.hasRequestedRntbdContext(), this, "Direct TCP context request was not issued"); final int limit = this.hasRntbdContext() ? this.pendingRequestLimit : Math.min(this.pendingRequestLimit, demand); return this.pendingRequests.size() < limit; } void pendWrite(final ByteBuf out, final ChannelPromise promise) { this.pendingWrites.add(out, promise); } Timestamps snapshotTimestamps() { return new Timestamps(this.timestamps); } private RntbdRequestRecord addPendingRequestRecord(final ChannelHandlerContext context, final RntbdRequestRecord record) { return this.pendingRequests.compute(record.transportRequestId(), (id, current) -> { reportIssueUnless(current == null, context, "id: {}, current: {}, request: {}", record); record.pendingRequestQueueSize(pendingRequests.size()); final Timeout pendingRequestTimeout = record.newTimeout(timeout -> { requestExpirationExecutor.execute(record::expire); }); record.whenComplete((response, error) -> { this.pendingRequests.remove(id); pendingRequestTimeout.cancel(); }); return record; }); } private void completeAllPendingRequestsExceptionally( final ChannelHandlerContext context, final Throwable throwable ) { reportIssueUnless(!this.closingExceptionally, context, "", throwable); this.closingExceptionally = true; if (this.pendingWrites != null && !this.pendingWrites.isEmpty()) { this.pendingWrites.releaseAndFailAll(context, throwable); } if (this.pendingRequests.isEmpty()) { return; } if (!this.contextRequestFuture.isDone()) { this.contextRequestFuture.completeExceptionally(throwable); } if (!this.contextFuture.isDone()) { this.contextFuture.completeExceptionally(throwable); } final int count = this.pendingRequests.size(); Exception contextRequestException = null; String phrase = null; if (this.contextRequestFuture.isCompletedExceptionally()) { try { this.contextRequestFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request write cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request write failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request write failed"; contextRequestException = new ChannelException(error); } } else if (this.contextFuture.isCompletedExceptionally()) { try { this.contextFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request read cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request read failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request read failed"; contextRequestException = new ChannelException(error); } } else { phrase = "closed exceptionally"; } final String message = lenientFormat("%s %s with %s pending requests", context, phrase, count); final Exception cause; if (throwable instanceof ClosedChannelException) { cause = contextRequestException == null ? (ClosedChannelException) throwable : contextRequestException; } else { cause = throwable instanceof Exception ? (Exception) throwable : new ChannelException(throwable); } for (RntbdRequestRecord record : this.pendingRequests.values()) { final Map<String, String> requestHeaders = record.args().serviceRequest().getHeaders(); final String requestUri = record.args().physicalAddress().toString(); final GoneException error = new GoneException(message, cause, null, requestUri); BridgeInternal.setRequestHeaders(error, requestHeaders); record.completeExceptionally(error); } } /** * This method is called for each incoming message of type {@link RntbdResponse} to complete a request. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager request manager} belongs. * @param response the {@link RntbdResponse message} received. */ private void messageReceived(final ChannelHandlerContext context, final RntbdResponse response) { final Long transportRequestId = response.getTransportRequestId(); if (transportRequestId == null) { reportIssue(context, "response ignored because its transportRequestId is missing: {}", response); return; } final RntbdRequestRecord requestRecord = this.pendingRequests.get(transportRequestId); if (requestRecord == null) { logger.debug("response {} ignored because its requestRecord is missing: {}", transportRequestId, response); return; } requestRecord.responseLength(response.getMessageLength()); requestRecord.stage(RntbdRequestRecord.Stage.RECEIVED); final HttpResponseStatus status = response.getStatus(); final UUID activityId = response.getActivityId(); final int statusCode = status.code(); if (HttpResponseStatus.OK.code() <= statusCode && statusCode < HttpResponseStatus.MULTIPLE_CHOICES.code()) { final StoreResponse storeResponse = response.toStoreResponse(this.contextFuture.getNow(null)); requestRecord.complete(storeResponse); } else { final CosmosException cause; final long lsn = response.getHeader(RntbdResponseHeader.LSN); final String partitionKeyRangeId = response.getHeader(RntbdResponseHeader.PartitionKeyRangeId); final CosmosError error = response.hasPayload() ? new CosmosError(RntbdObjectMapper.readTree(response)) : new CosmosError(Integer.toString(statusCode), status.reasonPhrase(), status.codeClass().name()); final Map<String, String> responseHeaders = response.getHeaders().asMap( this.rntbdContext().orElseThrow(IllegalStateException::new), activityId ); switch (status.code()) { case StatusCodes.BADREQUEST: cause = new BadRequestException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.CONFLICT: cause = new ConflictException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.FORBIDDEN: cause = new ForbiddenException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.GONE: final int subStatusCode = Math.toIntExact(response.getHeader(RntbdResponseHeader.SubStatus)); switch (subStatusCode) { case SubStatusCodes.COMPLETING_SPLIT: cause = new PartitionKeyRangeIsSplittingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.COMPLETING_PARTITION_MIGRATION: cause = new PartitionIsMigratingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.NAME_CACHE_IS_STALE: cause = new InvalidPartitionException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.PARTITION_KEY_RANGE_GONE: cause = new PartitionKeyRangeGoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = new GoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; } break; case StatusCodes.INTERNAL_SERVER_ERROR: cause = new InternalServerErrorException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.LOCKED: cause = new LockedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.METHOD_NOT_ALLOWED: cause = new MethodNotAllowedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.NOTFOUND: cause = new NotFoundException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.PRECONDITION_FAILED: cause = new PreconditionFailedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_ENTITY_TOO_LARGE: cause = new RequestEntityTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_TIMEOUT: Exception inner = new RequestTimeoutException(error, lsn, partitionKeyRangeId, responseHeaders); String resourceAddress = requestRecord.args().physicalAddress() != null ? requestRecord.args().physicalAddress().toString() : null; cause = new GoneException(resourceAddress, error, lsn, partitionKeyRangeId, responseHeaders, inner); break; case StatusCodes.RETRY_WITH: cause = new RetryWithException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.SERVICE_UNAVAILABLE: cause = new ServiceUnavailableException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.TOO_MANY_REQUESTS: cause = new RequestRateTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.UNAUTHORIZED: cause = new UnauthorizedException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = BridgeInternal.createCosmosException(status.code(), error, responseHeaders); break; } requestRecord.completeExceptionally(cause); } } private void removeContextNegotiatorAndFlushPendingWrites(final ChannelHandlerContext context) { final RntbdContextNegotiator negotiator = context.pipeline().get(RntbdContextNegotiator.class); negotiator.removeInboundHandler(); negotiator.removeOutboundHandler(); if (!this.pendingWrites.isEmpty()) { this.pendingWrites.writeAndRemoveAll(context); context.flush(); } } private static void reportIssue(final Object subject, final String format, final Object... args) { RntbdReporter.reportIssue(logger, subject, format, args); } private static void reportIssueUnless( final boolean predicate, final Object subject, final String format, final Object... args ) { RntbdReporter.reportIssueUnless(logger, predicate, subject, format, args); } private void traceOperation(final ChannelHandlerContext context, final String operationName, final Object... args) { logger.debug("{}\n{}\n{}", operationName, context, args); } private static final class UnhealthyChannelException extends ChannelException { static final UnhealthyChannelException INSTANCE = new UnhealthyChannelException(); private UnhealthyChannelException() { super("health check failed"); } @Override public Throwable fillInStackTrace() { return this; } } }
you don't need "{}" for exception and also exception.toString not needed here. ```suggestion logger.debug("SslException when attempting to close the outbound SSL connection", exception); ```
public void close(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "close"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CLOSE); } else { logger.debug("{} closed exceptionally", context); } final SslHandler sslHandler = context.pipeline().get(SslHandler.class); if (sslHandler != null) { try { sslHandler.closeOutbound(); } catch (Exception exception) { if (exception instanceof SSLException) { logger.debug( "SslException when attempting to close the outbound SSL connection: {}", exception.toString()); } else { logger.warn( "Exception when attempting to close the outbound SSL connection: {}", exception.toString()); throw exception; } } } context.close(promise); }
exception.toString());
public void close(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "close"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CLOSE); } else { logger.debug("{} closed exceptionally", context); } final SslHandler sslHandler = context.pipeline().get(SslHandler.class); if (sslHandler != null) { try { sslHandler.closeOutbound(); } catch (Exception exception) { if (exception instanceof SSLException) { logger.debug( "SslException when attempting to close the outbound SSL connection: ", exception); } else { logger.warn( "Exception when attempting to close the outbound SSL connection: ", exception); throw exception; } } } context.close(promise); }
class RntbdRequestManager implements ChannelHandler, ChannelInboundHandler, ChannelOutboundHandler { private static final ClosedChannelException ON_CHANNEL_UNREGISTERED = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "channelUnregistered"); private static final ClosedChannelException ON_CLOSE = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "close"); private static final ClosedChannelException ON_DEREGISTER = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "deregister"); private static final EventExecutor requestExpirationExecutor = new DefaultEventExecutor(new RntbdThreadFactory( "request-expirator", true, Thread.NORM_PRIORITY)); private static final Logger logger = LoggerFactory.getLogger(RntbdRequestManager.class); private final CompletableFuture<RntbdContext> contextFuture = new CompletableFuture<>(); private final CompletableFuture<RntbdContextRequest> contextRequestFuture = new CompletableFuture<>(); private final ChannelHealthChecker healthChecker; private final int pendingRequestLimit; private final ConcurrentHashMap<Long, RntbdRequestRecord> pendingRequests; private final Timestamps timestamps = new Timestamps(); private boolean closingExceptionally = false; private CoalescingBufferQueue pendingWrites; public RntbdRequestManager(final ChannelHealthChecker healthChecker, final int pendingRequestLimit) { checkArgument(pendingRequestLimit > 0, "pendingRequestLimit: %s", pendingRequestLimit); checkNotNull(healthChecker, "healthChecker"); this.pendingRequests = new ConcurrentHashMap<>(pendingRequestLimit); this.pendingRequestLimit = pendingRequestLimit; this.healthChecker = healthChecker; } /** * Gets called after the {@link ChannelHandler} was added to the actual context and it's ready to handle events. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerAdded(final ChannelHandlerContext context) { this.traceOperation(context, "handlerAdded"); } /** * Gets called after the {@link ChannelHandler} was removed from the actual context and it doesn't handle events * anymore. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerRemoved(final ChannelHandlerContext context) { this.traceOperation(context, "handlerRemoved"); } /** * The {@link Channel} of the {@link ChannelHandlerContext} is now active * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelActive(final ChannelHandlerContext context) { this.traceOperation(context, "channelActive"); context.fireChannelActive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was registered and has reached the end of its lifetime * <p> * This method will only be called after the channel is closed. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelInactive(final ChannelHandlerContext context) { this.traceOperation(context, "channelInactive"); context.fireChannelInactive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} has read a message from its peer. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs. * @param message The message read. */ @Override public void channelRead(final ChannelHandlerContext context, final Object message) { this.traceOperation(context, "channelRead"); try { if (message.getClass() == RntbdResponse.class) { try { this.messageReceived(context, (RntbdResponse) message); } catch (CorruptedFrameException error) { this.exceptionCaught(context, error); } catch (Throwable throwable) { reportIssue(context, "{} ", message, throwable); this.exceptionCaught(context, throwable); } } else { final IllegalStateException error = new IllegalStateException( lenientFormat("expected message of %s, not %s: %s", RntbdResponse.class, message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } } finally { if (message instanceof ReferenceCounted) { boolean released = ((ReferenceCounted) message).release(); reportIssueUnless(released, context, "failed to release message: {}", message); } } } /** * The {@link Channel} of the {@link ChannelHandlerContext} has fully consumed the most-recent message read. * <p> * If {@link ChannelOption * {@link Channel} will be made until {@link ChannelHandlerContext * for outbound messages to be written. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelReadComplete(final ChannelHandlerContext context) { this.traceOperation(context, "channelReadComplete"); this.timestamps.channelReadCompleted(); context.fireChannelReadComplete(); } /** * Constructs a {@link CoalescingBufferQueue} for buffering encoded requests until we have an {@link RntbdRequest} * <p> * This method then calls {@link ChannelHandlerContext * {@link ChannelInboundHandler} in the {@link ChannelPipeline}. * <p> * Sub-classes may override this method to change behavior. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made */ @Override public void channelRegistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelRegistered"); reportIssueUnless(this.pendingWrites == null, context, "pendingWrites: {}", pendingWrites); this.pendingWrites = new CoalescingBufferQueue(context.channel()); context.fireChannelRegistered(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was unregistered from its {@link EventLoop} * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelUnregistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelUnregistered"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CHANNEL_UNREGISTERED); } else { logger.debug("{} channelUnregistered exceptionally", context); } context.fireChannelUnregistered(); } /** * Gets called once the writable state of a {@link Channel} changed. You can check the state with * {@link Channel * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelWritabilityChanged(final ChannelHandlerContext context) { this.traceOperation(context, "channelWritabilityChanged"); context.fireChannelWritabilityChanged(); } /** * Processes {@link ChannelHandlerContext * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param cause Exception caught */ @Override @SuppressWarnings("deprecation") public void exceptionCaught(final ChannelHandlerContext context, final Throwable cause) { this.traceOperation(context, "exceptionCaught", cause); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, cause); logger.debug("{} closing due to:", context, cause); context.flush().close(); } } /** * Processes inbound events triggered by channel handlers in the {@link RntbdClientChannelHandler} pipeline * <p> * All but inbound request management events are ignored. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param event An object representing a user event */ @Override public void userEventTriggered(final ChannelHandlerContext context, final Object event) { this.traceOperation(context, "userEventTriggered", event); try { if (event instanceof IdleStateEvent) { this.healthChecker.isHealthy(context.channel()).addListener((Future<Boolean> future) -> { final Throwable cause; if (future.isSuccess()) { if (future.get()) { return; } cause = UnhealthyChannelException.INSTANCE; } else { cause = future.cause(); } this.exceptionCaught(context, cause); }); return; } if (event instanceof RntbdContext) { this.contextFuture.complete((RntbdContext) event); this.removeContextNegotiatorAndFlushPendingWrites(context); return; } if (event instanceof RntbdContextException) { this.contextFuture.completeExceptionally((RntbdContextException) event); context.pipeline().flush().close(); return; } context.fireUserEventTriggered(event); } catch (Throwable error) { reportIssue(context, "{}: ", event, error); this.exceptionCaught(context, error); } } /** * Called once a bind operation is made. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made * @param localAddress the {@link SocketAddress} to which it should bound * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void bind(final ChannelHandlerContext context, final SocketAddress localAddress, final ChannelPromise promise) { this.traceOperation(context, "bind", localAddress); context.bind(localAddress, promise); } /** * Called once a close operation is made. * * @param context the {@link ChannelHandlerContext} for which the close operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override /** * Called once a connect operation is made. * * @param context the {@link ChannelHandlerContext} for which the connect operation is made * @param remoteAddress the {@link SocketAddress} to which it should connect * @param localAddress the {@link SocketAddress} which is used as source on connect * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void connect( final ChannelHandlerContext context, final SocketAddress remoteAddress, final SocketAddress localAddress, final ChannelPromise promise ) { this.traceOperation(context, "connect", remoteAddress, localAddress); context.connect(remoteAddress, localAddress, promise); } /** * Called once a deregister operation is made from the current registered {@link EventLoop}. * * @param context the {@link ChannelHandlerContext} for which the deregister operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void deregister(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "deregister"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_DEREGISTER); } else { logger.debug("{} deregistered exceptionally", context); } context.deregister(promise); } /** * Called once a disconnect operation is made. * * @param context the {@link ChannelHandlerContext} for which the disconnect operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void disconnect(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "disconnect"); context.disconnect(promise); } /** * Called once a flush operation is made * <p> * The flush operation will try to flush out all previous written messages that are pending. * * @param context the {@link ChannelHandlerContext} for which the flush operation is made */ @Override public void flush(final ChannelHandlerContext context) { this.traceOperation(context, "flush"); context.flush(); } /** * Intercepts {@link ChannelHandlerContext * * @param context the {@link ChannelHandlerContext} for which the read operation is made */ @Override public void read(final ChannelHandlerContext context) { this.traceOperation(context, "read"); context.read(); } /** * Called once a write operation is made * <p> * The write operation will send messages through the {@link ChannelPipeline} which are then ready to be flushed * to the actual {@link Channel}. This will occur when {@link Channel * * @param context the {@link ChannelHandlerContext} for which the write operation is made * @param message the message to write * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void write(final ChannelHandlerContext context, final Object message, final ChannelPromise promise) { this.traceOperation(context, "write", message); if (message instanceof RntbdRequestRecord) { final RntbdRequestRecord record = (RntbdRequestRecord) message; this.timestamps.channelWriteAttempted(); record.setSendingRequestHasStarted(); context.write(this.addPendingRequestRecord(context, record), promise).addListener(completed -> { record.stage(RntbdRequestRecord.Stage.SENT); if (completed.isSuccess()) { this.timestamps.channelWriteCompleted(); } }); return; } if (message == RntbdHealthCheckRequest.MESSAGE) { context.write(RntbdHealthCheckRequest.MESSAGE, promise).addListener(completed -> { if (completed.isSuccess()) { this.timestamps.channelPingCompleted(); } }); return; } final IllegalStateException error = new IllegalStateException(lenientFormat("message of %s: %s", message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } int pendingRequestCount() { return this.pendingRequests.size(); } Optional<RntbdContext> rntbdContext() { return Optional.of(this.contextFuture.getNow(null)); } CompletableFuture<RntbdContextRequest> rntbdContextRequestFuture() { return this.contextRequestFuture; } boolean hasRequestedRntbdContext() { return this.contextRequestFuture.getNow(null) != null; } boolean hasRntbdContext() { return this.contextFuture.getNow(null) != null; } boolean isServiceable(final int demand) { reportIssueUnless(this.hasRequestedRntbdContext(), this, "Direct TCP context request was not issued"); final int limit = this.hasRntbdContext() ? this.pendingRequestLimit : Math.min(this.pendingRequestLimit, demand); return this.pendingRequests.size() < limit; } void pendWrite(final ByteBuf out, final ChannelPromise promise) { this.pendingWrites.add(out, promise); } Timestamps snapshotTimestamps() { return new Timestamps(this.timestamps); } private RntbdRequestRecord addPendingRequestRecord(final ChannelHandlerContext context, final RntbdRequestRecord record) { return this.pendingRequests.compute(record.transportRequestId(), (id, current) -> { reportIssueUnless(current == null, context, "id: {}, current: {}, request: {}", record); record.pendingRequestQueueSize(pendingRequests.size()); final Timeout pendingRequestTimeout = record.newTimeout(timeout -> { requestExpirationExecutor.execute(record::expire); }); record.whenComplete((response, error) -> { this.pendingRequests.remove(id); pendingRequestTimeout.cancel(); }); return record; }); } private void completeAllPendingRequestsExceptionally( final ChannelHandlerContext context, final Throwable throwable ) { reportIssueUnless(!this.closingExceptionally, context, "", throwable); this.closingExceptionally = true; if (this.pendingWrites != null && !this.pendingWrites.isEmpty()) { this.pendingWrites.releaseAndFailAll(context, throwable); } if (this.pendingRequests.isEmpty()) { return; } if (!this.contextRequestFuture.isDone()) { this.contextRequestFuture.completeExceptionally(throwable); } if (!this.contextFuture.isDone()) { this.contextFuture.completeExceptionally(throwable); } final int count = this.pendingRequests.size(); Exception contextRequestException = null; String phrase = null; if (this.contextRequestFuture.isCompletedExceptionally()) { try { this.contextRequestFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request write cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request write failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request write failed"; contextRequestException = new ChannelException(error); } } else if (this.contextFuture.isCompletedExceptionally()) { try { this.contextFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request read cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request read failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request read failed"; contextRequestException = new ChannelException(error); } } else { phrase = "closed exceptionally"; } final String message = lenientFormat("%s %s with %s pending requests", context, phrase, count); final Exception cause; if (throwable instanceof ClosedChannelException) { cause = contextRequestException == null ? (ClosedChannelException) throwable : contextRequestException; } else { cause = throwable instanceof Exception ? (Exception) throwable : new ChannelException(throwable); } for (RntbdRequestRecord record : this.pendingRequests.values()) { final Map<String, String> requestHeaders = record.args().serviceRequest().getHeaders(); final String requestUri = record.args().physicalAddress().toString(); final GoneException error = new GoneException(message, cause, null, requestUri); BridgeInternal.setRequestHeaders(error, requestHeaders); record.completeExceptionally(error); } } /** * This method is called for each incoming message of type {@link RntbdResponse} to complete a request. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager request manager} belongs. * @param response the {@link RntbdResponse message} received. */ private void messageReceived(final ChannelHandlerContext context, final RntbdResponse response) { final Long transportRequestId = response.getTransportRequestId(); if (transportRequestId == null) { reportIssue(context, "response ignored because its transportRequestId is missing: {}", response); return; } final RntbdRequestRecord requestRecord = this.pendingRequests.get(transportRequestId); if (requestRecord == null) { logger.debug("response {} ignored because its requestRecord is missing: {}", transportRequestId, response); return; } requestRecord.responseLength(response.getMessageLength()); requestRecord.stage(RntbdRequestRecord.Stage.RECEIVED); final HttpResponseStatus status = response.getStatus(); final UUID activityId = response.getActivityId(); final int statusCode = status.code(); if (HttpResponseStatus.OK.code() <= statusCode && statusCode < HttpResponseStatus.MULTIPLE_CHOICES.code()) { final StoreResponse storeResponse = response.toStoreResponse(this.contextFuture.getNow(null)); requestRecord.complete(storeResponse); } else { final CosmosException cause; final long lsn = response.getHeader(RntbdResponseHeader.LSN); final String partitionKeyRangeId = response.getHeader(RntbdResponseHeader.PartitionKeyRangeId); final CosmosError error = response.hasPayload() ? new CosmosError(RntbdObjectMapper.readTree(response)) : new CosmosError(Integer.toString(statusCode), status.reasonPhrase(), status.codeClass().name()); final Map<String, String> responseHeaders = response.getHeaders().asMap( this.rntbdContext().orElseThrow(IllegalStateException::new), activityId ); switch (status.code()) { case StatusCodes.BADREQUEST: cause = new BadRequestException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.CONFLICT: cause = new ConflictException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.FORBIDDEN: cause = new ForbiddenException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.GONE: final int subStatusCode = Math.toIntExact(response.getHeader(RntbdResponseHeader.SubStatus)); switch (subStatusCode) { case SubStatusCodes.COMPLETING_SPLIT: cause = new PartitionKeyRangeIsSplittingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.COMPLETING_PARTITION_MIGRATION: cause = new PartitionIsMigratingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.NAME_CACHE_IS_STALE: cause = new InvalidPartitionException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.PARTITION_KEY_RANGE_GONE: cause = new PartitionKeyRangeGoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = new GoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; } break; case StatusCodes.INTERNAL_SERVER_ERROR: cause = new InternalServerErrorException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.LOCKED: cause = new LockedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.METHOD_NOT_ALLOWED: cause = new MethodNotAllowedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.NOTFOUND: cause = new NotFoundException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.PRECONDITION_FAILED: cause = new PreconditionFailedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_ENTITY_TOO_LARGE: cause = new RequestEntityTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_TIMEOUT: Exception inner = new RequestTimeoutException(error, lsn, partitionKeyRangeId, responseHeaders); String resourceAddress = requestRecord.args().physicalAddress() != null ? requestRecord.args().physicalAddress().toString() : null; cause = new GoneException(resourceAddress, error, lsn, partitionKeyRangeId, responseHeaders, inner); break; case StatusCodes.RETRY_WITH: cause = new RetryWithException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.SERVICE_UNAVAILABLE: cause = new ServiceUnavailableException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.TOO_MANY_REQUESTS: cause = new RequestRateTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.UNAUTHORIZED: cause = new UnauthorizedException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = BridgeInternal.createCosmosException(status.code(), error, responseHeaders); break; } requestRecord.completeExceptionally(cause); } } private void removeContextNegotiatorAndFlushPendingWrites(final ChannelHandlerContext context) { final RntbdContextNegotiator negotiator = context.pipeline().get(RntbdContextNegotiator.class); negotiator.removeInboundHandler(); negotiator.removeOutboundHandler(); if (!this.pendingWrites.isEmpty()) { this.pendingWrites.writeAndRemoveAll(context); context.flush(); } } private static void reportIssue(final Object subject, final String format, final Object... args) { RntbdReporter.reportIssue(logger, subject, format, args); } private static void reportIssueUnless( final boolean predicate, final Object subject, final String format, final Object... args ) { RntbdReporter.reportIssueUnless(logger, predicate, subject, format, args); } private void traceOperation(final ChannelHandlerContext context, final String operationName, final Object... args) { logger.debug("{}\n{}\n{}", operationName, context, args); } private static final class UnhealthyChannelException extends ChannelException { static final UnhealthyChannelException INSTANCE = new UnhealthyChannelException(); private UnhealthyChannelException() { super("health check failed"); } @Override public Throwable fillInStackTrace() { return this; } } }
class RntbdRequestManager implements ChannelHandler, ChannelInboundHandler, ChannelOutboundHandler { private static final ClosedChannelException ON_CHANNEL_UNREGISTERED = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "channelUnregistered"); private static final ClosedChannelException ON_CLOSE = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "close"); private static final ClosedChannelException ON_DEREGISTER = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "deregister"); private static final EventExecutor requestExpirationExecutor = new DefaultEventExecutor(new RntbdThreadFactory( "request-expirator", true, Thread.NORM_PRIORITY)); private static final Logger logger = LoggerFactory.getLogger(RntbdRequestManager.class); private final CompletableFuture<RntbdContext> contextFuture = new CompletableFuture<>(); private final CompletableFuture<RntbdContextRequest> contextRequestFuture = new CompletableFuture<>(); private final ChannelHealthChecker healthChecker; private final int pendingRequestLimit; private final ConcurrentHashMap<Long, RntbdRequestRecord> pendingRequests; private final Timestamps timestamps = new Timestamps(); private boolean closingExceptionally = false; private CoalescingBufferQueue pendingWrites; public RntbdRequestManager(final ChannelHealthChecker healthChecker, final int pendingRequestLimit) { checkArgument(pendingRequestLimit > 0, "pendingRequestLimit: %s", pendingRequestLimit); checkNotNull(healthChecker, "healthChecker"); this.pendingRequests = new ConcurrentHashMap<>(pendingRequestLimit); this.pendingRequestLimit = pendingRequestLimit; this.healthChecker = healthChecker; } /** * Gets called after the {@link ChannelHandler} was added to the actual context and it's ready to handle events. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerAdded(final ChannelHandlerContext context) { this.traceOperation(context, "handlerAdded"); } /** * Gets called after the {@link ChannelHandler} was removed from the actual context and it doesn't handle events * anymore. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerRemoved(final ChannelHandlerContext context) { this.traceOperation(context, "handlerRemoved"); } /** * The {@link Channel} of the {@link ChannelHandlerContext} is now active * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelActive(final ChannelHandlerContext context) { this.traceOperation(context, "channelActive"); context.fireChannelActive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was registered and has reached the end of its lifetime * <p> * This method will only be called after the channel is closed. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelInactive(final ChannelHandlerContext context) { this.traceOperation(context, "channelInactive"); context.fireChannelInactive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} has read a message from its peer. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs. * @param message The message read. */ @Override public void channelRead(final ChannelHandlerContext context, final Object message) { this.traceOperation(context, "channelRead"); try { if (message.getClass() == RntbdResponse.class) { try { this.messageReceived(context, (RntbdResponse) message); } catch (CorruptedFrameException error) { this.exceptionCaught(context, error); } catch (Throwable throwable) { reportIssue(context, "{} ", message, throwable); this.exceptionCaught(context, throwable); } } else { final IllegalStateException error = new IllegalStateException( lenientFormat("expected message of %s, not %s: %s", RntbdResponse.class, message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } } finally { if (message instanceof ReferenceCounted) { boolean released = ((ReferenceCounted) message).release(); reportIssueUnless(released, context, "failed to release message: {}", message); } } } /** * The {@link Channel} of the {@link ChannelHandlerContext} has fully consumed the most-recent message read. * <p> * If {@link ChannelOption * {@link Channel} will be made until {@link ChannelHandlerContext * for outbound messages to be written. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelReadComplete(final ChannelHandlerContext context) { this.traceOperation(context, "channelReadComplete"); this.timestamps.channelReadCompleted(); context.fireChannelReadComplete(); } /** * Constructs a {@link CoalescingBufferQueue} for buffering encoded requests until we have an {@link RntbdRequest} * <p> * This method then calls {@link ChannelHandlerContext * {@link ChannelInboundHandler} in the {@link ChannelPipeline}. * <p> * Sub-classes may override this method to change behavior. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made */ @Override public void channelRegistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelRegistered"); reportIssueUnless(this.pendingWrites == null, context, "pendingWrites: {}", pendingWrites); this.pendingWrites = new CoalescingBufferQueue(context.channel()); context.fireChannelRegistered(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was unregistered from its {@link EventLoop} * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelUnregistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelUnregistered"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CHANNEL_UNREGISTERED); } else { logger.debug("{} channelUnregistered exceptionally", context); } context.fireChannelUnregistered(); } /** * Gets called once the writable state of a {@link Channel} changed. You can check the state with * {@link Channel * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelWritabilityChanged(final ChannelHandlerContext context) { this.traceOperation(context, "channelWritabilityChanged"); context.fireChannelWritabilityChanged(); } /** * Processes {@link ChannelHandlerContext * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param cause Exception caught */ @Override @SuppressWarnings("deprecation") public void exceptionCaught(final ChannelHandlerContext context, final Throwable cause) { this.traceOperation(context, "exceptionCaught", cause); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, cause); logger.debug("{} closing due to:", context, cause); context.flush().close(); } } /** * Processes inbound events triggered by channel handlers in the {@link RntbdClientChannelHandler} pipeline * <p> * All but inbound request management events are ignored. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param event An object representing a user event */ @Override public void userEventTriggered(final ChannelHandlerContext context, final Object event) { this.traceOperation(context, "userEventTriggered", event); try { if (event instanceof IdleStateEvent) { this.healthChecker.isHealthy(context.channel()).addListener((Future<Boolean> future) -> { final Throwable cause; if (future.isSuccess()) { if (future.get()) { return; } cause = UnhealthyChannelException.INSTANCE; } else { cause = future.cause(); } this.exceptionCaught(context, cause); }); return; } if (event instanceof RntbdContext) { this.contextFuture.complete((RntbdContext) event); this.removeContextNegotiatorAndFlushPendingWrites(context); return; } if (event instanceof RntbdContextException) { this.contextFuture.completeExceptionally((RntbdContextException) event); context.pipeline().flush().close(); return; } context.fireUserEventTriggered(event); } catch (Throwable error) { reportIssue(context, "{}: ", event, error); this.exceptionCaught(context, error); } } /** * Called once a bind operation is made. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made * @param localAddress the {@link SocketAddress} to which it should bound * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void bind(final ChannelHandlerContext context, final SocketAddress localAddress, final ChannelPromise promise) { this.traceOperation(context, "bind", localAddress); context.bind(localAddress, promise); } /** * Called once a close operation is made. * * @param context the {@link ChannelHandlerContext} for which the close operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override /** * Called once a connect operation is made. * * @param context the {@link ChannelHandlerContext} for which the connect operation is made * @param remoteAddress the {@link SocketAddress} to which it should connect * @param localAddress the {@link SocketAddress} which is used as source on connect * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void connect( final ChannelHandlerContext context, final SocketAddress remoteAddress, final SocketAddress localAddress, final ChannelPromise promise ) { this.traceOperation(context, "connect", remoteAddress, localAddress); context.connect(remoteAddress, localAddress, promise); } /** * Called once a deregister operation is made from the current registered {@link EventLoop}. * * @param context the {@link ChannelHandlerContext} for which the deregister operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void deregister(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "deregister"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_DEREGISTER); } else { logger.debug("{} deregistered exceptionally", context); } context.deregister(promise); } /** * Called once a disconnect operation is made. * * @param context the {@link ChannelHandlerContext} for which the disconnect operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void disconnect(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "disconnect"); context.disconnect(promise); } /** * Called once a flush operation is made * <p> * The flush operation will try to flush out all previous written messages that are pending. * * @param context the {@link ChannelHandlerContext} for which the flush operation is made */ @Override public void flush(final ChannelHandlerContext context) { this.traceOperation(context, "flush"); context.flush(); } /** * Intercepts {@link ChannelHandlerContext * * @param context the {@link ChannelHandlerContext} for which the read operation is made */ @Override public void read(final ChannelHandlerContext context) { this.traceOperation(context, "read"); context.read(); } /** * Called once a write operation is made * <p> * The write operation will send messages through the {@link ChannelPipeline} which are then ready to be flushed * to the actual {@link Channel}. This will occur when {@link Channel * * @param context the {@link ChannelHandlerContext} for which the write operation is made * @param message the message to write * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void write(final ChannelHandlerContext context, final Object message, final ChannelPromise promise) { this.traceOperation(context, "write", message); if (message instanceof RntbdRequestRecord) { final RntbdRequestRecord record = (RntbdRequestRecord) message; this.timestamps.channelWriteAttempted(); record.setSendingRequestHasStarted(); context.write(this.addPendingRequestRecord(context, record), promise).addListener(completed -> { record.stage(RntbdRequestRecord.Stage.SENT); if (completed.isSuccess()) { this.timestamps.channelWriteCompleted(); } }); return; } if (message == RntbdHealthCheckRequest.MESSAGE) { context.write(RntbdHealthCheckRequest.MESSAGE, promise).addListener(completed -> { if (completed.isSuccess()) { this.timestamps.channelPingCompleted(); } }); return; } final IllegalStateException error = new IllegalStateException(lenientFormat("message of %s: %s", message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } int pendingRequestCount() { return this.pendingRequests.size(); } Optional<RntbdContext> rntbdContext() { return Optional.of(this.contextFuture.getNow(null)); } CompletableFuture<RntbdContextRequest> rntbdContextRequestFuture() { return this.contextRequestFuture; } boolean hasRequestedRntbdContext() { return this.contextRequestFuture.getNow(null) != null; } boolean hasRntbdContext() { return this.contextFuture.getNow(null) != null; } boolean isServiceable(final int demand) { reportIssueUnless(this.hasRequestedRntbdContext(), this, "Direct TCP context request was not issued"); final int limit = this.hasRntbdContext() ? this.pendingRequestLimit : Math.min(this.pendingRequestLimit, demand); return this.pendingRequests.size() < limit; } void pendWrite(final ByteBuf out, final ChannelPromise promise) { this.pendingWrites.add(out, promise); } Timestamps snapshotTimestamps() { return new Timestamps(this.timestamps); } private RntbdRequestRecord addPendingRequestRecord(final ChannelHandlerContext context, final RntbdRequestRecord record) { return this.pendingRequests.compute(record.transportRequestId(), (id, current) -> { reportIssueUnless(current == null, context, "id: {}, current: {}, request: {}", record); record.pendingRequestQueueSize(pendingRequests.size()); final Timeout pendingRequestTimeout = record.newTimeout(timeout -> { requestExpirationExecutor.execute(record::expire); }); record.whenComplete((response, error) -> { this.pendingRequests.remove(id); pendingRequestTimeout.cancel(); }); return record; }); } private void completeAllPendingRequestsExceptionally( final ChannelHandlerContext context, final Throwable throwable ) { reportIssueUnless(!this.closingExceptionally, context, "", throwable); this.closingExceptionally = true; if (this.pendingWrites != null && !this.pendingWrites.isEmpty()) { this.pendingWrites.releaseAndFailAll(context, throwable); } if (this.pendingRequests.isEmpty()) { return; } if (!this.contextRequestFuture.isDone()) { this.contextRequestFuture.completeExceptionally(throwable); } if (!this.contextFuture.isDone()) { this.contextFuture.completeExceptionally(throwable); } final int count = this.pendingRequests.size(); Exception contextRequestException = null; String phrase = null; if (this.contextRequestFuture.isCompletedExceptionally()) { try { this.contextRequestFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request write cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request write failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request write failed"; contextRequestException = new ChannelException(error); } } else if (this.contextFuture.isCompletedExceptionally()) { try { this.contextFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request read cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request read failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request read failed"; contextRequestException = new ChannelException(error); } } else { phrase = "closed exceptionally"; } final String message = lenientFormat("%s %s with %s pending requests", context, phrase, count); final Exception cause; if (throwable instanceof ClosedChannelException) { cause = contextRequestException == null ? (ClosedChannelException) throwable : contextRequestException; } else { cause = throwable instanceof Exception ? (Exception) throwable : new ChannelException(throwable); } for (RntbdRequestRecord record : this.pendingRequests.values()) { final Map<String, String> requestHeaders = record.args().serviceRequest().getHeaders(); final String requestUri = record.args().physicalAddress().toString(); final GoneException error = new GoneException(message, cause, null, requestUri); BridgeInternal.setRequestHeaders(error, requestHeaders); record.completeExceptionally(error); } } /** * This method is called for each incoming message of type {@link RntbdResponse} to complete a request. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager request manager} belongs. * @param response the {@link RntbdResponse message} received. */ private void messageReceived(final ChannelHandlerContext context, final RntbdResponse response) { final Long transportRequestId = response.getTransportRequestId(); if (transportRequestId == null) { reportIssue(context, "response ignored because its transportRequestId is missing: {}", response); return; } final RntbdRequestRecord requestRecord = this.pendingRequests.get(transportRequestId); if (requestRecord == null) { logger.debug("response {} ignored because its requestRecord is missing: {}", transportRequestId, response); return; } requestRecord.responseLength(response.getMessageLength()); requestRecord.stage(RntbdRequestRecord.Stage.RECEIVED); final HttpResponseStatus status = response.getStatus(); final UUID activityId = response.getActivityId(); final int statusCode = status.code(); if (HttpResponseStatus.OK.code() <= statusCode && statusCode < HttpResponseStatus.MULTIPLE_CHOICES.code()) { final StoreResponse storeResponse = response.toStoreResponse(this.contextFuture.getNow(null)); requestRecord.complete(storeResponse); } else { final CosmosException cause; final long lsn = response.getHeader(RntbdResponseHeader.LSN); final String partitionKeyRangeId = response.getHeader(RntbdResponseHeader.PartitionKeyRangeId); final CosmosError error = response.hasPayload() ? new CosmosError(RntbdObjectMapper.readTree(response)) : new CosmosError(Integer.toString(statusCode), status.reasonPhrase(), status.codeClass().name()); final Map<String, String> responseHeaders = response.getHeaders().asMap( this.rntbdContext().orElseThrow(IllegalStateException::new), activityId ); switch (status.code()) { case StatusCodes.BADREQUEST: cause = new BadRequestException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.CONFLICT: cause = new ConflictException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.FORBIDDEN: cause = new ForbiddenException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.GONE: final int subStatusCode = Math.toIntExact(response.getHeader(RntbdResponseHeader.SubStatus)); switch (subStatusCode) { case SubStatusCodes.COMPLETING_SPLIT: cause = new PartitionKeyRangeIsSplittingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.COMPLETING_PARTITION_MIGRATION: cause = new PartitionIsMigratingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.NAME_CACHE_IS_STALE: cause = new InvalidPartitionException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.PARTITION_KEY_RANGE_GONE: cause = new PartitionKeyRangeGoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = new GoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; } break; case StatusCodes.INTERNAL_SERVER_ERROR: cause = new InternalServerErrorException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.LOCKED: cause = new LockedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.METHOD_NOT_ALLOWED: cause = new MethodNotAllowedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.NOTFOUND: cause = new NotFoundException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.PRECONDITION_FAILED: cause = new PreconditionFailedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_ENTITY_TOO_LARGE: cause = new RequestEntityTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_TIMEOUT: Exception inner = new RequestTimeoutException(error, lsn, partitionKeyRangeId, responseHeaders); String resourceAddress = requestRecord.args().physicalAddress() != null ? requestRecord.args().physicalAddress().toString() : null; cause = new GoneException(resourceAddress, error, lsn, partitionKeyRangeId, responseHeaders, inner); break; case StatusCodes.RETRY_WITH: cause = new RetryWithException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.SERVICE_UNAVAILABLE: cause = new ServiceUnavailableException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.TOO_MANY_REQUESTS: cause = new RequestRateTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.UNAUTHORIZED: cause = new UnauthorizedException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = BridgeInternal.createCosmosException(status.code(), error, responseHeaders); break; } requestRecord.completeExceptionally(cause); } } private void removeContextNegotiatorAndFlushPendingWrites(final ChannelHandlerContext context) { final RntbdContextNegotiator negotiator = context.pipeline().get(RntbdContextNegotiator.class); negotiator.removeInboundHandler(); negotiator.removeOutboundHandler(); if (!this.pendingWrites.isEmpty()) { this.pendingWrites.writeAndRemoveAll(context); context.flush(); } } private static void reportIssue(final Object subject, final String format, final Object... args) { RntbdReporter.reportIssue(logger, subject, format, args); } private static void reportIssueUnless( final boolean predicate, final Object subject, final String format, final Object... args ) { RntbdReporter.reportIssueUnless(logger, predicate, subject, format, args); } private void traceOperation(final ChannelHandlerContext context, final String operationName, final Object... args) { logger.debug("{}\n{}\n{}", operationName, context, args); } private static final class UnhealthyChannelException extends ChannelException { static final UnhealthyChannelException INSTANCE = new UnhealthyChannelException(); private UnhealthyChannelException() { super("health check failed"); } @Override public Throwable fillInStackTrace() { return this; } } }
ditto.
public void close(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "close"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CLOSE); } else { logger.debug("{} closed exceptionally", context); } final SslHandler sslHandler = context.pipeline().get(SslHandler.class); if (sslHandler != null) { try { sslHandler.closeOutbound(); } catch (Exception exception) { if (exception instanceof SSLException) { logger.debug( "SslException when attempting to close the outbound SSL connection: {}", exception.toString()); } else { logger.warn( "Exception when attempting to close the outbound SSL connection: {}", exception.toString()); throw exception; } } } context.close(promise); }
exception.toString());
public void close(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "close"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CLOSE); } else { logger.debug("{} closed exceptionally", context); } final SslHandler sslHandler = context.pipeline().get(SslHandler.class); if (sslHandler != null) { try { sslHandler.closeOutbound(); } catch (Exception exception) { if (exception instanceof SSLException) { logger.debug( "SslException when attempting to close the outbound SSL connection: ", exception); } else { logger.warn( "Exception when attempting to close the outbound SSL connection: ", exception); throw exception; } } } context.close(promise); }
class RntbdRequestManager implements ChannelHandler, ChannelInboundHandler, ChannelOutboundHandler { private static final ClosedChannelException ON_CHANNEL_UNREGISTERED = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "channelUnregistered"); private static final ClosedChannelException ON_CLOSE = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "close"); private static final ClosedChannelException ON_DEREGISTER = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "deregister"); private static final EventExecutor requestExpirationExecutor = new DefaultEventExecutor(new RntbdThreadFactory( "request-expirator", true, Thread.NORM_PRIORITY)); private static final Logger logger = LoggerFactory.getLogger(RntbdRequestManager.class); private final CompletableFuture<RntbdContext> contextFuture = new CompletableFuture<>(); private final CompletableFuture<RntbdContextRequest> contextRequestFuture = new CompletableFuture<>(); private final ChannelHealthChecker healthChecker; private final int pendingRequestLimit; private final ConcurrentHashMap<Long, RntbdRequestRecord> pendingRequests; private final Timestamps timestamps = new Timestamps(); private boolean closingExceptionally = false; private CoalescingBufferQueue pendingWrites; public RntbdRequestManager(final ChannelHealthChecker healthChecker, final int pendingRequestLimit) { checkArgument(pendingRequestLimit > 0, "pendingRequestLimit: %s", pendingRequestLimit); checkNotNull(healthChecker, "healthChecker"); this.pendingRequests = new ConcurrentHashMap<>(pendingRequestLimit); this.pendingRequestLimit = pendingRequestLimit; this.healthChecker = healthChecker; } /** * Gets called after the {@link ChannelHandler} was added to the actual context and it's ready to handle events. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerAdded(final ChannelHandlerContext context) { this.traceOperation(context, "handlerAdded"); } /** * Gets called after the {@link ChannelHandler} was removed from the actual context and it doesn't handle events * anymore. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerRemoved(final ChannelHandlerContext context) { this.traceOperation(context, "handlerRemoved"); } /** * The {@link Channel} of the {@link ChannelHandlerContext} is now active * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelActive(final ChannelHandlerContext context) { this.traceOperation(context, "channelActive"); context.fireChannelActive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was registered and has reached the end of its lifetime * <p> * This method will only be called after the channel is closed. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelInactive(final ChannelHandlerContext context) { this.traceOperation(context, "channelInactive"); context.fireChannelInactive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} has read a message from its peer. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs. * @param message The message read. */ @Override public void channelRead(final ChannelHandlerContext context, final Object message) { this.traceOperation(context, "channelRead"); try { if (message.getClass() == RntbdResponse.class) { try { this.messageReceived(context, (RntbdResponse) message); } catch (CorruptedFrameException error) { this.exceptionCaught(context, error); } catch (Throwable throwable) { reportIssue(context, "{} ", message, throwable); this.exceptionCaught(context, throwable); } } else { final IllegalStateException error = new IllegalStateException( lenientFormat("expected message of %s, not %s: %s", RntbdResponse.class, message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } } finally { if (message instanceof ReferenceCounted) { boolean released = ((ReferenceCounted) message).release(); reportIssueUnless(released, context, "failed to release message: {}", message); } } } /** * The {@link Channel} of the {@link ChannelHandlerContext} has fully consumed the most-recent message read. * <p> * If {@link ChannelOption * {@link Channel} will be made until {@link ChannelHandlerContext * for outbound messages to be written. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelReadComplete(final ChannelHandlerContext context) { this.traceOperation(context, "channelReadComplete"); this.timestamps.channelReadCompleted(); context.fireChannelReadComplete(); } /** * Constructs a {@link CoalescingBufferQueue} for buffering encoded requests until we have an {@link RntbdRequest} * <p> * This method then calls {@link ChannelHandlerContext * {@link ChannelInboundHandler} in the {@link ChannelPipeline}. * <p> * Sub-classes may override this method to change behavior. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made */ @Override public void channelRegistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelRegistered"); reportIssueUnless(this.pendingWrites == null, context, "pendingWrites: {}", pendingWrites); this.pendingWrites = new CoalescingBufferQueue(context.channel()); context.fireChannelRegistered(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was unregistered from its {@link EventLoop} * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelUnregistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelUnregistered"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CHANNEL_UNREGISTERED); } else { logger.debug("{} channelUnregistered exceptionally", context); } context.fireChannelUnregistered(); } /** * Gets called once the writable state of a {@link Channel} changed. You can check the state with * {@link Channel * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelWritabilityChanged(final ChannelHandlerContext context) { this.traceOperation(context, "channelWritabilityChanged"); context.fireChannelWritabilityChanged(); } /** * Processes {@link ChannelHandlerContext * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param cause Exception caught */ @Override @SuppressWarnings("deprecation") public void exceptionCaught(final ChannelHandlerContext context, final Throwable cause) { this.traceOperation(context, "exceptionCaught", cause); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, cause); logger.debug("{} closing due to:", context, cause); context.flush().close(); } } /** * Processes inbound events triggered by channel handlers in the {@link RntbdClientChannelHandler} pipeline * <p> * All but inbound request management events are ignored. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param event An object representing a user event */ @Override public void userEventTriggered(final ChannelHandlerContext context, final Object event) { this.traceOperation(context, "userEventTriggered", event); try { if (event instanceof IdleStateEvent) { this.healthChecker.isHealthy(context.channel()).addListener((Future<Boolean> future) -> { final Throwable cause; if (future.isSuccess()) { if (future.get()) { return; } cause = UnhealthyChannelException.INSTANCE; } else { cause = future.cause(); } this.exceptionCaught(context, cause); }); return; } if (event instanceof RntbdContext) { this.contextFuture.complete((RntbdContext) event); this.removeContextNegotiatorAndFlushPendingWrites(context); return; } if (event instanceof RntbdContextException) { this.contextFuture.completeExceptionally((RntbdContextException) event); context.pipeline().flush().close(); return; } context.fireUserEventTriggered(event); } catch (Throwable error) { reportIssue(context, "{}: ", event, error); this.exceptionCaught(context, error); } } /** * Called once a bind operation is made. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made * @param localAddress the {@link SocketAddress} to which it should bound * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void bind(final ChannelHandlerContext context, final SocketAddress localAddress, final ChannelPromise promise) { this.traceOperation(context, "bind", localAddress); context.bind(localAddress, promise); } /** * Called once a close operation is made. * * @param context the {@link ChannelHandlerContext} for which the close operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override /** * Called once a connect operation is made. * * @param context the {@link ChannelHandlerContext} for which the connect operation is made * @param remoteAddress the {@link SocketAddress} to which it should connect * @param localAddress the {@link SocketAddress} which is used as source on connect * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void connect( final ChannelHandlerContext context, final SocketAddress remoteAddress, final SocketAddress localAddress, final ChannelPromise promise ) { this.traceOperation(context, "connect", remoteAddress, localAddress); context.connect(remoteAddress, localAddress, promise); } /** * Called once a deregister operation is made from the current registered {@link EventLoop}. * * @param context the {@link ChannelHandlerContext} for which the deregister operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void deregister(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "deregister"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_DEREGISTER); } else { logger.debug("{} deregistered exceptionally", context); } context.deregister(promise); } /** * Called once a disconnect operation is made. * * @param context the {@link ChannelHandlerContext} for which the disconnect operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void disconnect(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "disconnect"); context.disconnect(promise); } /** * Called once a flush operation is made * <p> * The flush operation will try to flush out all previous written messages that are pending. * * @param context the {@link ChannelHandlerContext} for which the flush operation is made */ @Override public void flush(final ChannelHandlerContext context) { this.traceOperation(context, "flush"); context.flush(); } /** * Intercepts {@link ChannelHandlerContext * * @param context the {@link ChannelHandlerContext} for which the read operation is made */ @Override public void read(final ChannelHandlerContext context) { this.traceOperation(context, "read"); context.read(); } /** * Called once a write operation is made * <p> * The write operation will send messages through the {@link ChannelPipeline} which are then ready to be flushed * to the actual {@link Channel}. This will occur when {@link Channel * * @param context the {@link ChannelHandlerContext} for which the write operation is made * @param message the message to write * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void write(final ChannelHandlerContext context, final Object message, final ChannelPromise promise) { this.traceOperation(context, "write", message); if (message instanceof RntbdRequestRecord) { final RntbdRequestRecord record = (RntbdRequestRecord) message; this.timestamps.channelWriteAttempted(); record.setSendingRequestHasStarted(); context.write(this.addPendingRequestRecord(context, record), promise).addListener(completed -> { record.stage(RntbdRequestRecord.Stage.SENT); if (completed.isSuccess()) { this.timestamps.channelWriteCompleted(); } }); return; } if (message == RntbdHealthCheckRequest.MESSAGE) { context.write(RntbdHealthCheckRequest.MESSAGE, promise).addListener(completed -> { if (completed.isSuccess()) { this.timestamps.channelPingCompleted(); } }); return; } final IllegalStateException error = new IllegalStateException(lenientFormat("message of %s: %s", message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } int pendingRequestCount() { return this.pendingRequests.size(); } Optional<RntbdContext> rntbdContext() { return Optional.of(this.contextFuture.getNow(null)); } CompletableFuture<RntbdContextRequest> rntbdContextRequestFuture() { return this.contextRequestFuture; } boolean hasRequestedRntbdContext() { return this.contextRequestFuture.getNow(null) != null; } boolean hasRntbdContext() { return this.contextFuture.getNow(null) != null; } boolean isServiceable(final int demand) { reportIssueUnless(this.hasRequestedRntbdContext(), this, "Direct TCP context request was not issued"); final int limit = this.hasRntbdContext() ? this.pendingRequestLimit : Math.min(this.pendingRequestLimit, demand); return this.pendingRequests.size() < limit; } void pendWrite(final ByteBuf out, final ChannelPromise promise) { this.pendingWrites.add(out, promise); } Timestamps snapshotTimestamps() { return new Timestamps(this.timestamps); } private RntbdRequestRecord addPendingRequestRecord(final ChannelHandlerContext context, final RntbdRequestRecord record) { return this.pendingRequests.compute(record.transportRequestId(), (id, current) -> { reportIssueUnless(current == null, context, "id: {}, current: {}, request: {}", record); record.pendingRequestQueueSize(pendingRequests.size()); final Timeout pendingRequestTimeout = record.newTimeout(timeout -> { requestExpirationExecutor.execute(record::expire); }); record.whenComplete((response, error) -> { this.pendingRequests.remove(id); pendingRequestTimeout.cancel(); }); return record; }); } private void completeAllPendingRequestsExceptionally( final ChannelHandlerContext context, final Throwable throwable ) { reportIssueUnless(!this.closingExceptionally, context, "", throwable); this.closingExceptionally = true; if (this.pendingWrites != null && !this.pendingWrites.isEmpty()) { this.pendingWrites.releaseAndFailAll(context, throwable); } if (this.pendingRequests.isEmpty()) { return; } if (!this.contextRequestFuture.isDone()) { this.contextRequestFuture.completeExceptionally(throwable); } if (!this.contextFuture.isDone()) { this.contextFuture.completeExceptionally(throwable); } final int count = this.pendingRequests.size(); Exception contextRequestException = null; String phrase = null; if (this.contextRequestFuture.isCompletedExceptionally()) { try { this.contextRequestFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request write cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request write failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request write failed"; contextRequestException = new ChannelException(error); } } else if (this.contextFuture.isCompletedExceptionally()) { try { this.contextFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request read cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request read failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request read failed"; contextRequestException = new ChannelException(error); } } else { phrase = "closed exceptionally"; } final String message = lenientFormat("%s %s with %s pending requests", context, phrase, count); final Exception cause; if (throwable instanceof ClosedChannelException) { cause = contextRequestException == null ? (ClosedChannelException) throwable : contextRequestException; } else { cause = throwable instanceof Exception ? (Exception) throwable : new ChannelException(throwable); } for (RntbdRequestRecord record : this.pendingRequests.values()) { final Map<String, String> requestHeaders = record.args().serviceRequest().getHeaders(); final String requestUri = record.args().physicalAddress().toString(); final GoneException error = new GoneException(message, cause, null, requestUri); BridgeInternal.setRequestHeaders(error, requestHeaders); record.completeExceptionally(error); } } /** * This method is called for each incoming message of type {@link RntbdResponse} to complete a request. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager request manager} belongs. * @param response the {@link RntbdResponse message} received. */ private void messageReceived(final ChannelHandlerContext context, final RntbdResponse response) { final Long transportRequestId = response.getTransportRequestId(); if (transportRequestId == null) { reportIssue(context, "response ignored because its transportRequestId is missing: {}", response); return; } final RntbdRequestRecord requestRecord = this.pendingRequests.get(transportRequestId); if (requestRecord == null) { logger.debug("response {} ignored because its requestRecord is missing: {}", transportRequestId, response); return; } requestRecord.responseLength(response.getMessageLength()); requestRecord.stage(RntbdRequestRecord.Stage.RECEIVED); final HttpResponseStatus status = response.getStatus(); final UUID activityId = response.getActivityId(); final int statusCode = status.code(); if (HttpResponseStatus.OK.code() <= statusCode && statusCode < HttpResponseStatus.MULTIPLE_CHOICES.code()) { final StoreResponse storeResponse = response.toStoreResponse(this.contextFuture.getNow(null)); requestRecord.complete(storeResponse); } else { final CosmosException cause; final long lsn = response.getHeader(RntbdResponseHeader.LSN); final String partitionKeyRangeId = response.getHeader(RntbdResponseHeader.PartitionKeyRangeId); final CosmosError error = response.hasPayload() ? new CosmosError(RntbdObjectMapper.readTree(response)) : new CosmosError(Integer.toString(statusCode), status.reasonPhrase(), status.codeClass().name()); final Map<String, String> responseHeaders = response.getHeaders().asMap( this.rntbdContext().orElseThrow(IllegalStateException::new), activityId ); switch (status.code()) { case StatusCodes.BADREQUEST: cause = new BadRequestException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.CONFLICT: cause = new ConflictException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.FORBIDDEN: cause = new ForbiddenException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.GONE: final int subStatusCode = Math.toIntExact(response.getHeader(RntbdResponseHeader.SubStatus)); switch (subStatusCode) { case SubStatusCodes.COMPLETING_SPLIT: cause = new PartitionKeyRangeIsSplittingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.COMPLETING_PARTITION_MIGRATION: cause = new PartitionIsMigratingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.NAME_CACHE_IS_STALE: cause = new InvalidPartitionException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.PARTITION_KEY_RANGE_GONE: cause = new PartitionKeyRangeGoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = new GoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; } break; case StatusCodes.INTERNAL_SERVER_ERROR: cause = new InternalServerErrorException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.LOCKED: cause = new LockedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.METHOD_NOT_ALLOWED: cause = new MethodNotAllowedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.NOTFOUND: cause = new NotFoundException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.PRECONDITION_FAILED: cause = new PreconditionFailedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_ENTITY_TOO_LARGE: cause = new RequestEntityTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_TIMEOUT: Exception inner = new RequestTimeoutException(error, lsn, partitionKeyRangeId, responseHeaders); String resourceAddress = requestRecord.args().physicalAddress() != null ? requestRecord.args().physicalAddress().toString() : null; cause = new GoneException(resourceAddress, error, lsn, partitionKeyRangeId, responseHeaders, inner); break; case StatusCodes.RETRY_WITH: cause = new RetryWithException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.SERVICE_UNAVAILABLE: cause = new ServiceUnavailableException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.TOO_MANY_REQUESTS: cause = new RequestRateTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.UNAUTHORIZED: cause = new UnauthorizedException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = BridgeInternal.createCosmosException(status.code(), error, responseHeaders); break; } requestRecord.completeExceptionally(cause); } } private void removeContextNegotiatorAndFlushPendingWrites(final ChannelHandlerContext context) { final RntbdContextNegotiator negotiator = context.pipeline().get(RntbdContextNegotiator.class); negotiator.removeInboundHandler(); negotiator.removeOutboundHandler(); if (!this.pendingWrites.isEmpty()) { this.pendingWrites.writeAndRemoveAll(context); context.flush(); } } private static void reportIssue(final Object subject, final String format, final Object... args) { RntbdReporter.reportIssue(logger, subject, format, args); } private static void reportIssueUnless( final boolean predicate, final Object subject, final String format, final Object... args ) { RntbdReporter.reportIssueUnless(logger, predicate, subject, format, args); } private void traceOperation(final ChannelHandlerContext context, final String operationName, final Object... args) { logger.debug("{}\n{}\n{}", operationName, context, args); } private static final class UnhealthyChannelException extends ChannelException { static final UnhealthyChannelException INSTANCE = new UnhealthyChannelException(); private UnhealthyChannelException() { super("health check failed"); } @Override public Throwable fillInStackTrace() { return this; } } }
class RntbdRequestManager implements ChannelHandler, ChannelInboundHandler, ChannelOutboundHandler { private static final ClosedChannelException ON_CHANNEL_UNREGISTERED = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "channelUnregistered"); private static final ClosedChannelException ON_CLOSE = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "close"); private static final ClosedChannelException ON_DEREGISTER = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "deregister"); private static final EventExecutor requestExpirationExecutor = new DefaultEventExecutor(new RntbdThreadFactory( "request-expirator", true, Thread.NORM_PRIORITY)); private static final Logger logger = LoggerFactory.getLogger(RntbdRequestManager.class); private final CompletableFuture<RntbdContext> contextFuture = new CompletableFuture<>(); private final CompletableFuture<RntbdContextRequest> contextRequestFuture = new CompletableFuture<>(); private final ChannelHealthChecker healthChecker; private final int pendingRequestLimit; private final ConcurrentHashMap<Long, RntbdRequestRecord> pendingRequests; private final Timestamps timestamps = new Timestamps(); private boolean closingExceptionally = false; private CoalescingBufferQueue pendingWrites; public RntbdRequestManager(final ChannelHealthChecker healthChecker, final int pendingRequestLimit) { checkArgument(pendingRequestLimit > 0, "pendingRequestLimit: %s", pendingRequestLimit); checkNotNull(healthChecker, "healthChecker"); this.pendingRequests = new ConcurrentHashMap<>(pendingRequestLimit); this.pendingRequestLimit = pendingRequestLimit; this.healthChecker = healthChecker; } /** * Gets called after the {@link ChannelHandler} was added to the actual context and it's ready to handle events. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerAdded(final ChannelHandlerContext context) { this.traceOperation(context, "handlerAdded"); } /** * Gets called after the {@link ChannelHandler} was removed from the actual context and it doesn't handle events * anymore. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerRemoved(final ChannelHandlerContext context) { this.traceOperation(context, "handlerRemoved"); } /** * The {@link Channel} of the {@link ChannelHandlerContext} is now active * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelActive(final ChannelHandlerContext context) { this.traceOperation(context, "channelActive"); context.fireChannelActive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was registered and has reached the end of its lifetime * <p> * This method will only be called after the channel is closed. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelInactive(final ChannelHandlerContext context) { this.traceOperation(context, "channelInactive"); context.fireChannelInactive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} has read a message from its peer. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs. * @param message The message read. */ @Override public void channelRead(final ChannelHandlerContext context, final Object message) { this.traceOperation(context, "channelRead"); try { if (message.getClass() == RntbdResponse.class) { try { this.messageReceived(context, (RntbdResponse) message); } catch (CorruptedFrameException error) { this.exceptionCaught(context, error); } catch (Throwable throwable) { reportIssue(context, "{} ", message, throwable); this.exceptionCaught(context, throwable); } } else { final IllegalStateException error = new IllegalStateException( lenientFormat("expected message of %s, not %s: %s", RntbdResponse.class, message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } } finally { if (message instanceof ReferenceCounted) { boolean released = ((ReferenceCounted) message).release(); reportIssueUnless(released, context, "failed to release message: {}", message); } } } /** * The {@link Channel} of the {@link ChannelHandlerContext} has fully consumed the most-recent message read. * <p> * If {@link ChannelOption * {@link Channel} will be made until {@link ChannelHandlerContext * for outbound messages to be written. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelReadComplete(final ChannelHandlerContext context) { this.traceOperation(context, "channelReadComplete"); this.timestamps.channelReadCompleted(); context.fireChannelReadComplete(); } /** * Constructs a {@link CoalescingBufferQueue} for buffering encoded requests until we have an {@link RntbdRequest} * <p> * This method then calls {@link ChannelHandlerContext * {@link ChannelInboundHandler} in the {@link ChannelPipeline}. * <p> * Sub-classes may override this method to change behavior. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made */ @Override public void channelRegistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelRegistered"); reportIssueUnless(this.pendingWrites == null, context, "pendingWrites: {}", pendingWrites); this.pendingWrites = new CoalescingBufferQueue(context.channel()); context.fireChannelRegistered(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was unregistered from its {@link EventLoop} * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelUnregistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelUnregistered"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CHANNEL_UNREGISTERED); } else { logger.debug("{} channelUnregistered exceptionally", context); } context.fireChannelUnregistered(); } /** * Gets called once the writable state of a {@link Channel} changed. You can check the state with * {@link Channel * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelWritabilityChanged(final ChannelHandlerContext context) { this.traceOperation(context, "channelWritabilityChanged"); context.fireChannelWritabilityChanged(); } /** * Processes {@link ChannelHandlerContext * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param cause Exception caught */ @Override @SuppressWarnings("deprecation") public void exceptionCaught(final ChannelHandlerContext context, final Throwable cause) { this.traceOperation(context, "exceptionCaught", cause); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, cause); logger.debug("{} closing due to:", context, cause); context.flush().close(); } } /** * Processes inbound events triggered by channel handlers in the {@link RntbdClientChannelHandler} pipeline * <p> * All but inbound request management events are ignored. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param event An object representing a user event */ @Override public void userEventTriggered(final ChannelHandlerContext context, final Object event) { this.traceOperation(context, "userEventTriggered", event); try { if (event instanceof IdleStateEvent) { this.healthChecker.isHealthy(context.channel()).addListener((Future<Boolean> future) -> { final Throwable cause; if (future.isSuccess()) { if (future.get()) { return; } cause = UnhealthyChannelException.INSTANCE; } else { cause = future.cause(); } this.exceptionCaught(context, cause); }); return; } if (event instanceof RntbdContext) { this.contextFuture.complete((RntbdContext) event); this.removeContextNegotiatorAndFlushPendingWrites(context); return; } if (event instanceof RntbdContextException) { this.contextFuture.completeExceptionally((RntbdContextException) event); context.pipeline().flush().close(); return; } context.fireUserEventTriggered(event); } catch (Throwable error) { reportIssue(context, "{}: ", event, error); this.exceptionCaught(context, error); } } /** * Called once a bind operation is made. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made * @param localAddress the {@link SocketAddress} to which it should bound * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void bind(final ChannelHandlerContext context, final SocketAddress localAddress, final ChannelPromise promise) { this.traceOperation(context, "bind", localAddress); context.bind(localAddress, promise); } /** * Called once a close operation is made. * * @param context the {@link ChannelHandlerContext} for which the close operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override /** * Called once a connect operation is made. * * @param context the {@link ChannelHandlerContext} for which the connect operation is made * @param remoteAddress the {@link SocketAddress} to which it should connect * @param localAddress the {@link SocketAddress} which is used as source on connect * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void connect( final ChannelHandlerContext context, final SocketAddress remoteAddress, final SocketAddress localAddress, final ChannelPromise promise ) { this.traceOperation(context, "connect", remoteAddress, localAddress); context.connect(remoteAddress, localAddress, promise); } /** * Called once a deregister operation is made from the current registered {@link EventLoop}. * * @param context the {@link ChannelHandlerContext} for which the deregister operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void deregister(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "deregister"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_DEREGISTER); } else { logger.debug("{} deregistered exceptionally", context); } context.deregister(promise); } /** * Called once a disconnect operation is made. * * @param context the {@link ChannelHandlerContext} for which the disconnect operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void disconnect(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "disconnect"); context.disconnect(promise); } /** * Called once a flush operation is made * <p> * The flush operation will try to flush out all previous written messages that are pending. * * @param context the {@link ChannelHandlerContext} for which the flush operation is made */ @Override public void flush(final ChannelHandlerContext context) { this.traceOperation(context, "flush"); context.flush(); } /** * Intercepts {@link ChannelHandlerContext * * @param context the {@link ChannelHandlerContext} for which the read operation is made */ @Override public void read(final ChannelHandlerContext context) { this.traceOperation(context, "read"); context.read(); } /** * Called once a write operation is made * <p> * The write operation will send messages through the {@link ChannelPipeline} which are then ready to be flushed * to the actual {@link Channel}. This will occur when {@link Channel * * @param context the {@link ChannelHandlerContext} for which the write operation is made * @param message the message to write * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void write(final ChannelHandlerContext context, final Object message, final ChannelPromise promise) { this.traceOperation(context, "write", message); if (message instanceof RntbdRequestRecord) { final RntbdRequestRecord record = (RntbdRequestRecord) message; this.timestamps.channelWriteAttempted(); record.setSendingRequestHasStarted(); context.write(this.addPendingRequestRecord(context, record), promise).addListener(completed -> { record.stage(RntbdRequestRecord.Stage.SENT); if (completed.isSuccess()) { this.timestamps.channelWriteCompleted(); } }); return; } if (message == RntbdHealthCheckRequest.MESSAGE) { context.write(RntbdHealthCheckRequest.MESSAGE, promise).addListener(completed -> { if (completed.isSuccess()) { this.timestamps.channelPingCompleted(); } }); return; } final IllegalStateException error = new IllegalStateException(lenientFormat("message of %s: %s", message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } int pendingRequestCount() { return this.pendingRequests.size(); } Optional<RntbdContext> rntbdContext() { return Optional.of(this.contextFuture.getNow(null)); } CompletableFuture<RntbdContextRequest> rntbdContextRequestFuture() { return this.contextRequestFuture; } boolean hasRequestedRntbdContext() { return this.contextRequestFuture.getNow(null) != null; } boolean hasRntbdContext() { return this.contextFuture.getNow(null) != null; } boolean isServiceable(final int demand) { reportIssueUnless(this.hasRequestedRntbdContext(), this, "Direct TCP context request was not issued"); final int limit = this.hasRntbdContext() ? this.pendingRequestLimit : Math.min(this.pendingRequestLimit, demand); return this.pendingRequests.size() < limit; } void pendWrite(final ByteBuf out, final ChannelPromise promise) { this.pendingWrites.add(out, promise); } Timestamps snapshotTimestamps() { return new Timestamps(this.timestamps); } private RntbdRequestRecord addPendingRequestRecord(final ChannelHandlerContext context, final RntbdRequestRecord record) { return this.pendingRequests.compute(record.transportRequestId(), (id, current) -> { reportIssueUnless(current == null, context, "id: {}, current: {}, request: {}", record); record.pendingRequestQueueSize(pendingRequests.size()); final Timeout pendingRequestTimeout = record.newTimeout(timeout -> { requestExpirationExecutor.execute(record::expire); }); record.whenComplete((response, error) -> { this.pendingRequests.remove(id); pendingRequestTimeout.cancel(); }); return record; }); } private void completeAllPendingRequestsExceptionally( final ChannelHandlerContext context, final Throwable throwable ) { reportIssueUnless(!this.closingExceptionally, context, "", throwable); this.closingExceptionally = true; if (this.pendingWrites != null && !this.pendingWrites.isEmpty()) { this.pendingWrites.releaseAndFailAll(context, throwable); } if (this.pendingRequests.isEmpty()) { return; } if (!this.contextRequestFuture.isDone()) { this.contextRequestFuture.completeExceptionally(throwable); } if (!this.contextFuture.isDone()) { this.contextFuture.completeExceptionally(throwable); } final int count = this.pendingRequests.size(); Exception contextRequestException = null; String phrase = null; if (this.contextRequestFuture.isCompletedExceptionally()) { try { this.contextRequestFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request write cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request write failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request write failed"; contextRequestException = new ChannelException(error); } } else if (this.contextFuture.isCompletedExceptionally()) { try { this.contextFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request read cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request read failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request read failed"; contextRequestException = new ChannelException(error); } } else { phrase = "closed exceptionally"; } final String message = lenientFormat("%s %s with %s pending requests", context, phrase, count); final Exception cause; if (throwable instanceof ClosedChannelException) { cause = contextRequestException == null ? (ClosedChannelException) throwable : contextRequestException; } else { cause = throwable instanceof Exception ? (Exception) throwable : new ChannelException(throwable); } for (RntbdRequestRecord record : this.pendingRequests.values()) { final Map<String, String> requestHeaders = record.args().serviceRequest().getHeaders(); final String requestUri = record.args().physicalAddress().toString(); final GoneException error = new GoneException(message, cause, null, requestUri); BridgeInternal.setRequestHeaders(error, requestHeaders); record.completeExceptionally(error); } } /** * This method is called for each incoming message of type {@link RntbdResponse} to complete a request. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager request manager} belongs. * @param response the {@link RntbdResponse message} received. */ private void messageReceived(final ChannelHandlerContext context, final RntbdResponse response) { final Long transportRequestId = response.getTransportRequestId(); if (transportRequestId == null) { reportIssue(context, "response ignored because its transportRequestId is missing: {}", response); return; } final RntbdRequestRecord requestRecord = this.pendingRequests.get(transportRequestId); if (requestRecord == null) { logger.debug("response {} ignored because its requestRecord is missing: {}", transportRequestId, response); return; } requestRecord.responseLength(response.getMessageLength()); requestRecord.stage(RntbdRequestRecord.Stage.RECEIVED); final HttpResponseStatus status = response.getStatus(); final UUID activityId = response.getActivityId(); final int statusCode = status.code(); if (HttpResponseStatus.OK.code() <= statusCode && statusCode < HttpResponseStatus.MULTIPLE_CHOICES.code()) { final StoreResponse storeResponse = response.toStoreResponse(this.contextFuture.getNow(null)); requestRecord.complete(storeResponse); } else { final CosmosException cause; final long lsn = response.getHeader(RntbdResponseHeader.LSN); final String partitionKeyRangeId = response.getHeader(RntbdResponseHeader.PartitionKeyRangeId); final CosmosError error = response.hasPayload() ? new CosmosError(RntbdObjectMapper.readTree(response)) : new CosmosError(Integer.toString(statusCode), status.reasonPhrase(), status.codeClass().name()); final Map<String, String> responseHeaders = response.getHeaders().asMap( this.rntbdContext().orElseThrow(IllegalStateException::new), activityId ); switch (status.code()) { case StatusCodes.BADREQUEST: cause = new BadRequestException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.CONFLICT: cause = new ConflictException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.FORBIDDEN: cause = new ForbiddenException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.GONE: final int subStatusCode = Math.toIntExact(response.getHeader(RntbdResponseHeader.SubStatus)); switch (subStatusCode) { case SubStatusCodes.COMPLETING_SPLIT: cause = new PartitionKeyRangeIsSplittingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.COMPLETING_PARTITION_MIGRATION: cause = new PartitionIsMigratingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.NAME_CACHE_IS_STALE: cause = new InvalidPartitionException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.PARTITION_KEY_RANGE_GONE: cause = new PartitionKeyRangeGoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = new GoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; } break; case StatusCodes.INTERNAL_SERVER_ERROR: cause = new InternalServerErrorException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.LOCKED: cause = new LockedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.METHOD_NOT_ALLOWED: cause = new MethodNotAllowedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.NOTFOUND: cause = new NotFoundException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.PRECONDITION_FAILED: cause = new PreconditionFailedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_ENTITY_TOO_LARGE: cause = new RequestEntityTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_TIMEOUT: Exception inner = new RequestTimeoutException(error, lsn, partitionKeyRangeId, responseHeaders); String resourceAddress = requestRecord.args().physicalAddress() != null ? requestRecord.args().physicalAddress().toString() : null; cause = new GoneException(resourceAddress, error, lsn, partitionKeyRangeId, responseHeaders, inner); break; case StatusCodes.RETRY_WITH: cause = new RetryWithException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.SERVICE_UNAVAILABLE: cause = new ServiceUnavailableException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.TOO_MANY_REQUESTS: cause = new RequestRateTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.UNAUTHORIZED: cause = new UnauthorizedException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = BridgeInternal.createCosmosException(status.code(), error, responseHeaders); break; } requestRecord.completeExceptionally(cause); } } private void removeContextNegotiatorAndFlushPendingWrites(final ChannelHandlerContext context) { final RntbdContextNegotiator negotiator = context.pipeline().get(RntbdContextNegotiator.class); negotiator.removeInboundHandler(); negotiator.removeOutboundHandler(); if (!this.pendingWrites.isEmpty()) { this.pendingWrites.writeAndRemoveAll(context); context.flush(); } } private static void reportIssue(final Object subject, final String format, final Object... args) { RntbdReporter.reportIssue(logger, subject, format, args); } private static void reportIssueUnless( final boolean predicate, final Object subject, final String format, final Object... args ) { RntbdReporter.reportIssueUnless(logger, predicate, subject, format, args); } private void traceOperation(final ChannelHandlerContext context, final String operationName, final Object... args) { logger.debug("{}\n{}\n{}", operationName, context, args); } private static final class UnhealthyChannelException extends ChannelException { static final UnhealthyChannelException INSTANCE = new UnhealthyChannelException(); private UnhealthyChannelException() { super("health check failed"); } @Override public Throwable fillInStackTrace() { return this; } } }
Fixed
public void close(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "close"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CLOSE); } else { logger.debug("{} closed exceptionally", context); } final SslHandler sslHandler = context.pipeline().get(SslHandler.class); if (sslHandler != null) { try { sslHandler.closeOutbound(); } catch (Exception exception) { if (exception instanceof SSLException) { logger.debug( "SslException when attempting to close the outbound SSL connection: {}", exception.toString()); } else { logger.warn( "Exception when attempting to close the outbound SSL connection: {}", exception.toString()); throw exception; } } } context.close(promise); }
exception.toString());
public void close(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "close"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CLOSE); } else { logger.debug("{} closed exceptionally", context); } final SslHandler sslHandler = context.pipeline().get(SslHandler.class); if (sslHandler != null) { try { sslHandler.closeOutbound(); } catch (Exception exception) { if (exception instanceof SSLException) { logger.debug( "SslException when attempting to close the outbound SSL connection: ", exception); } else { logger.warn( "Exception when attempting to close the outbound SSL connection: ", exception); throw exception; } } } context.close(promise); }
class RntbdRequestManager implements ChannelHandler, ChannelInboundHandler, ChannelOutboundHandler { private static final ClosedChannelException ON_CHANNEL_UNREGISTERED = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "channelUnregistered"); private static final ClosedChannelException ON_CLOSE = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "close"); private static final ClosedChannelException ON_DEREGISTER = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "deregister"); private static final EventExecutor requestExpirationExecutor = new DefaultEventExecutor(new RntbdThreadFactory( "request-expirator", true, Thread.NORM_PRIORITY)); private static final Logger logger = LoggerFactory.getLogger(RntbdRequestManager.class); private final CompletableFuture<RntbdContext> contextFuture = new CompletableFuture<>(); private final CompletableFuture<RntbdContextRequest> contextRequestFuture = new CompletableFuture<>(); private final ChannelHealthChecker healthChecker; private final int pendingRequestLimit; private final ConcurrentHashMap<Long, RntbdRequestRecord> pendingRequests; private final Timestamps timestamps = new Timestamps(); private boolean closingExceptionally = false; private CoalescingBufferQueue pendingWrites; public RntbdRequestManager(final ChannelHealthChecker healthChecker, final int pendingRequestLimit) { checkArgument(pendingRequestLimit > 0, "pendingRequestLimit: %s", pendingRequestLimit); checkNotNull(healthChecker, "healthChecker"); this.pendingRequests = new ConcurrentHashMap<>(pendingRequestLimit); this.pendingRequestLimit = pendingRequestLimit; this.healthChecker = healthChecker; } /** * Gets called after the {@link ChannelHandler} was added to the actual context and it's ready to handle events. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerAdded(final ChannelHandlerContext context) { this.traceOperation(context, "handlerAdded"); } /** * Gets called after the {@link ChannelHandler} was removed from the actual context and it doesn't handle events * anymore. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerRemoved(final ChannelHandlerContext context) { this.traceOperation(context, "handlerRemoved"); } /** * The {@link Channel} of the {@link ChannelHandlerContext} is now active * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelActive(final ChannelHandlerContext context) { this.traceOperation(context, "channelActive"); context.fireChannelActive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was registered and has reached the end of its lifetime * <p> * This method will only be called after the channel is closed. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelInactive(final ChannelHandlerContext context) { this.traceOperation(context, "channelInactive"); context.fireChannelInactive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} has read a message from its peer. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs. * @param message The message read. */ @Override public void channelRead(final ChannelHandlerContext context, final Object message) { this.traceOperation(context, "channelRead"); try { if (message.getClass() == RntbdResponse.class) { try { this.messageReceived(context, (RntbdResponse) message); } catch (CorruptedFrameException error) { this.exceptionCaught(context, error); } catch (Throwable throwable) { reportIssue(context, "{} ", message, throwable); this.exceptionCaught(context, throwable); } } else { final IllegalStateException error = new IllegalStateException( lenientFormat("expected message of %s, not %s: %s", RntbdResponse.class, message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } } finally { if (message instanceof ReferenceCounted) { boolean released = ((ReferenceCounted) message).release(); reportIssueUnless(released, context, "failed to release message: {}", message); } } } /** * The {@link Channel} of the {@link ChannelHandlerContext} has fully consumed the most-recent message read. * <p> * If {@link ChannelOption * {@link Channel} will be made until {@link ChannelHandlerContext * for outbound messages to be written. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelReadComplete(final ChannelHandlerContext context) { this.traceOperation(context, "channelReadComplete"); this.timestamps.channelReadCompleted(); context.fireChannelReadComplete(); } /** * Constructs a {@link CoalescingBufferQueue} for buffering encoded requests until we have an {@link RntbdRequest} * <p> * This method then calls {@link ChannelHandlerContext * {@link ChannelInboundHandler} in the {@link ChannelPipeline}. * <p> * Sub-classes may override this method to change behavior. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made */ @Override public void channelRegistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelRegistered"); reportIssueUnless(this.pendingWrites == null, context, "pendingWrites: {}", pendingWrites); this.pendingWrites = new CoalescingBufferQueue(context.channel()); context.fireChannelRegistered(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was unregistered from its {@link EventLoop} * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelUnregistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelUnregistered"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CHANNEL_UNREGISTERED); } else { logger.debug("{} channelUnregistered exceptionally", context); } context.fireChannelUnregistered(); } /** * Gets called once the writable state of a {@link Channel} changed. You can check the state with * {@link Channel * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelWritabilityChanged(final ChannelHandlerContext context) { this.traceOperation(context, "channelWritabilityChanged"); context.fireChannelWritabilityChanged(); } /** * Processes {@link ChannelHandlerContext * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param cause Exception caught */ @Override @SuppressWarnings("deprecation") public void exceptionCaught(final ChannelHandlerContext context, final Throwable cause) { this.traceOperation(context, "exceptionCaught", cause); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, cause); logger.debug("{} closing due to:", context, cause); context.flush().close(); } } /** * Processes inbound events triggered by channel handlers in the {@link RntbdClientChannelHandler} pipeline * <p> * All but inbound request management events are ignored. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param event An object representing a user event */ @Override public void userEventTriggered(final ChannelHandlerContext context, final Object event) { this.traceOperation(context, "userEventTriggered", event); try { if (event instanceof IdleStateEvent) { this.healthChecker.isHealthy(context.channel()).addListener((Future<Boolean> future) -> { final Throwable cause; if (future.isSuccess()) { if (future.get()) { return; } cause = UnhealthyChannelException.INSTANCE; } else { cause = future.cause(); } this.exceptionCaught(context, cause); }); return; } if (event instanceof RntbdContext) { this.contextFuture.complete((RntbdContext) event); this.removeContextNegotiatorAndFlushPendingWrites(context); return; } if (event instanceof RntbdContextException) { this.contextFuture.completeExceptionally((RntbdContextException) event); context.pipeline().flush().close(); return; } context.fireUserEventTriggered(event); } catch (Throwable error) { reportIssue(context, "{}: ", event, error); this.exceptionCaught(context, error); } } /** * Called once a bind operation is made. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made * @param localAddress the {@link SocketAddress} to which it should bound * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void bind(final ChannelHandlerContext context, final SocketAddress localAddress, final ChannelPromise promise) { this.traceOperation(context, "bind", localAddress); context.bind(localAddress, promise); } /** * Called once a close operation is made. * * @param context the {@link ChannelHandlerContext} for which the close operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override /** * Called once a connect operation is made. * * @param context the {@link ChannelHandlerContext} for which the connect operation is made * @param remoteAddress the {@link SocketAddress} to which it should connect * @param localAddress the {@link SocketAddress} which is used as source on connect * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void connect( final ChannelHandlerContext context, final SocketAddress remoteAddress, final SocketAddress localAddress, final ChannelPromise promise ) { this.traceOperation(context, "connect", remoteAddress, localAddress); context.connect(remoteAddress, localAddress, promise); } /** * Called once a deregister operation is made from the current registered {@link EventLoop}. * * @param context the {@link ChannelHandlerContext} for which the deregister operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void deregister(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "deregister"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_DEREGISTER); } else { logger.debug("{} deregistered exceptionally", context); } context.deregister(promise); } /** * Called once a disconnect operation is made. * * @param context the {@link ChannelHandlerContext} for which the disconnect operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void disconnect(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "disconnect"); context.disconnect(promise); } /** * Called once a flush operation is made * <p> * The flush operation will try to flush out all previous written messages that are pending. * * @param context the {@link ChannelHandlerContext} for which the flush operation is made */ @Override public void flush(final ChannelHandlerContext context) { this.traceOperation(context, "flush"); context.flush(); } /** * Intercepts {@link ChannelHandlerContext * * @param context the {@link ChannelHandlerContext} for which the read operation is made */ @Override public void read(final ChannelHandlerContext context) { this.traceOperation(context, "read"); context.read(); } /** * Called once a write operation is made * <p> * The write operation will send messages through the {@link ChannelPipeline} which are then ready to be flushed * to the actual {@link Channel}. This will occur when {@link Channel * * @param context the {@link ChannelHandlerContext} for which the write operation is made * @param message the message to write * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void write(final ChannelHandlerContext context, final Object message, final ChannelPromise promise) { this.traceOperation(context, "write", message); if (message instanceof RntbdRequestRecord) { final RntbdRequestRecord record = (RntbdRequestRecord) message; this.timestamps.channelWriteAttempted(); record.setSendingRequestHasStarted(); context.write(this.addPendingRequestRecord(context, record), promise).addListener(completed -> { record.stage(RntbdRequestRecord.Stage.SENT); if (completed.isSuccess()) { this.timestamps.channelWriteCompleted(); } }); return; } if (message == RntbdHealthCheckRequest.MESSAGE) { context.write(RntbdHealthCheckRequest.MESSAGE, promise).addListener(completed -> { if (completed.isSuccess()) { this.timestamps.channelPingCompleted(); } }); return; } final IllegalStateException error = new IllegalStateException(lenientFormat("message of %s: %s", message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } int pendingRequestCount() { return this.pendingRequests.size(); } Optional<RntbdContext> rntbdContext() { return Optional.of(this.contextFuture.getNow(null)); } CompletableFuture<RntbdContextRequest> rntbdContextRequestFuture() { return this.contextRequestFuture; } boolean hasRequestedRntbdContext() { return this.contextRequestFuture.getNow(null) != null; } boolean hasRntbdContext() { return this.contextFuture.getNow(null) != null; } boolean isServiceable(final int demand) { reportIssueUnless(this.hasRequestedRntbdContext(), this, "Direct TCP context request was not issued"); final int limit = this.hasRntbdContext() ? this.pendingRequestLimit : Math.min(this.pendingRequestLimit, demand); return this.pendingRequests.size() < limit; } void pendWrite(final ByteBuf out, final ChannelPromise promise) { this.pendingWrites.add(out, promise); } Timestamps snapshotTimestamps() { return new Timestamps(this.timestamps); } private RntbdRequestRecord addPendingRequestRecord(final ChannelHandlerContext context, final RntbdRequestRecord record) { return this.pendingRequests.compute(record.transportRequestId(), (id, current) -> { reportIssueUnless(current == null, context, "id: {}, current: {}, request: {}", record); record.pendingRequestQueueSize(pendingRequests.size()); final Timeout pendingRequestTimeout = record.newTimeout(timeout -> { requestExpirationExecutor.execute(record::expire); }); record.whenComplete((response, error) -> { this.pendingRequests.remove(id); pendingRequestTimeout.cancel(); }); return record; }); } private void completeAllPendingRequestsExceptionally( final ChannelHandlerContext context, final Throwable throwable ) { reportIssueUnless(!this.closingExceptionally, context, "", throwable); this.closingExceptionally = true; if (this.pendingWrites != null && !this.pendingWrites.isEmpty()) { this.pendingWrites.releaseAndFailAll(context, throwable); } if (this.pendingRequests.isEmpty()) { return; } if (!this.contextRequestFuture.isDone()) { this.contextRequestFuture.completeExceptionally(throwable); } if (!this.contextFuture.isDone()) { this.contextFuture.completeExceptionally(throwable); } final int count = this.pendingRequests.size(); Exception contextRequestException = null; String phrase = null; if (this.contextRequestFuture.isCompletedExceptionally()) { try { this.contextRequestFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request write cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request write failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request write failed"; contextRequestException = new ChannelException(error); } } else if (this.contextFuture.isCompletedExceptionally()) { try { this.contextFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request read cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request read failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request read failed"; contextRequestException = new ChannelException(error); } } else { phrase = "closed exceptionally"; } final String message = lenientFormat("%s %s with %s pending requests", context, phrase, count); final Exception cause; if (throwable instanceof ClosedChannelException) { cause = contextRequestException == null ? (ClosedChannelException) throwable : contextRequestException; } else { cause = throwable instanceof Exception ? (Exception) throwable : new ChannelException(throwable); } for (RntbdRequestRecord record : this.pendingRequests.values()) { final Map<String, String> requestHeaders = record.args().serviceRequest().getHeaders(); final String requestUri = record.args().physicalAddress().toString(); final GoneException error = new GoneException(message, cause, null, requestUri); BridgeInternal.setRequestHeaders(error, requestHeaders); record.completeExceptionally(error); } } /** * This method is called for each incoming message of type {@link RntbdResponse} to complete a request. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager request manager} belongs. * @param response the {@link RntbdResponse message} received. */ private void messageReceived(final ChannelHandlerContext context, final RntbdResponse response) { final Long transportRequestId = response.getTransportRequestId(); if (transportRequestId == null) { reportIssue(context, "response ignored because its transportRequestId is missing: {}", response); return; } final RntbdRequestRecord requestRecord = this.pendingRequests.get(transportRequestId); if (requestRecord == null) { logger.debug("response {} ignored because its requestRecord is missing: {}", transportRequestId, response); return; } requestRecord.responseLength(response.getMessageLength()); requestRecord.stage(RntbdRequestRecord.Stage.RECEIVED); final HttpResponseStatus status = response.getStatus(); final UUID activityId = response.getActivityId(); final int statusCode = status.code(); if (HttpResponseStatus.OK.code() <= statusCode && statusCode < HttpResponseStatus.MULTIPLE_CHOICES.code()) { final StoreResponse storeResponse = response.toStoreResponse(this.contextFuture.getNow(null)); requestRecord.complete(storeResponse); } else { final CosmosException cause; final long lsn = response.getHeader(RntbdResponseHeader.LSN); final String partitionKeyRangeId = response.getHeader(RntbdResponseHeader.PartitionKeyRangeId); final CosmosError error = response.hasPayload() ? new CosmosError(RntbdObjectMapper.readTree(response)) : new CosmosError(Integer.toString(statusCode), status.reasonPhrase(), status.codeClass().name()); final Map<String, String> responseHeaders = response.getHeaders().asMap( this.rntbdContext().orElseThrow(IllegalStateException::new), activityId ); switch (status.code()) { case StatusCodes.BADREQUEST: cause = new BadRequestException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.CONFLICT: cause = new ConflictException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.FORBIDDEN: cause = new ForbiddenException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.GONE: final int subStatusCode = Math.toIntExact(response.getHeader(RntbdResponseHeader.SubStatus)); switch (subStatusCode) { case SubStatusCodes.COMPLETING_SPLIT: cause = new PartitionKeyRangeIsSplittingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.COMPLETING_PARTITION_MIGRATION: cause = new PartitionIsMigratingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.NAME_CACHE_IS_STALE: cause = new InvalidPartitionException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.PARTITION_KEY_RANGE_GONE: cause = new PartitionKeyRangeGoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = new GoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; } break; case StatusCodes.INTERNAL_SERVER_ERROR: cause = new InternalServerErrorException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.LOCKED: cause = new LockedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.METHOD_NOT_ALLOWED: cause = new MethodNotAllowedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.NOTFOUND: cause = new NotFoundException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.PRECONDITION_FAILED: cause = new PreconditionFailedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_ENTITY_TOO_LARGE: cause = new RequestEntityTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_TIMEOUT: Exception inner = new RequestTimeoutException(error, lsn, partitionKeyRangeId, responseHeaders); String resourceAddress = requestRecord.args().physicalAddress() != null ? requestRecord.args().physicalAddress().toString() : null; cause = new GoneException(resourceAddress, error, lsn, partitionKeyRangeId, responseHeaders, inner); break; case StatusCodes.RETRY_WITH: cause = new RetryWithException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.SERVICE_UNAVAILABLE: cause = new ServiceUnavailableException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.TOO_MANY_REQUESTS: cause = new RequestRateTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.UNAUTHORIZED: cause = new UnauthorizedException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = BridgeInternal.createCosmosException(status.code(), error, responseHeaders); break; } requestRecord.completeExceptionally(cause); } } private void removeContextNegotiatorAndFlushPendingWrites(final ChannelHandlerContext context) { final RntbdContextNegotiator negotiator = context.pipeline().get(RntbdContextNegotiator.class); negotiator.removeInboundHandler(); negotiator.removeOutboundHandler(); if (!this.pendingWrites.isEmpty()) { this.pendingWrites.writeAndRemoveAll(context); context.flush(); } } private static void reportIssue(final Object subject, final String format, final Object... args) { RntbdReporter.reportIssue(logger, subject, format, args); } private static void reportIssueUnless( final boolean predicate, final Object subject, final String format, final Object... args ) { RntbdReporter.reportIssueUnless(logger, predicate, subject, format, args); } private void traceOperation(final ChannelHandlerContext context, final String operationName, final Object... args) { logger.debug("{}\n{}\n{}", operationName, context, args); } private static final class UnhealthyChannelException extends ChannelException { static final UnhealthyChannelException INSTANCE = new UnhealthyChannelException(); private UnhealthyChannelException() { super("health check failed"); } @Override public Throwable fillInStackTrace() { return this; } } }
class RntbdRequestManager implements ChannelHandler, ChannelInboundHandler, ChannelOutboundHandler { private static final ClosedChannelException ON_CHANNEL_UNREGISTERED = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "channelUnregistered"); private static final ClosedChannelException ON_CLOSE = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "close"); private static final ClosedChannelException ON_DEREGISTER = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "deregister"); private static final EventExecutor requestExpirationExecutor = new DefaultEventExecutor(new RntbdThreadFactory( "request-expirator", true, Thread.NORM_PRIORITY)); private static final Logger logger = LoggerFactory.getLogger(RntbdRequestManager.class); private final CompletableFuture<RntbdContext> contextFuture = new CompletableFuture<>(); private final CompletableFuture<RntbdContextRequest> contextRequestFuture = new CompletableFuture<>(); private final ChannelHealthChecker healthChecker; private final int pendingRequestLimit; private final ConcurrentHashMap<Long, RntbdRequestRecord> pendingRequests; private final Timestamps timestamps = new Timestamps(); private boolean closingExceptionally = false; private CoalescingBufferQueue pendingWrites; public RntbdRequestManager(final ChannelHealthChecker healthChecker, final int pendingRequestLimit) { checkArgument(pendingRequestLimit > 0, "pendingRequestLimit: %s", pendingRequestLimit); checkNotNull(healthChecker, "healthChecker"); this.pendingRequests = new ConcurrentHashMap<>(pendingRequestLimit); this.pendingRequestLimit = pendingRequestLimit; this.healthChecker = healthChecker; } /** * Gets called after the {@link ChannelHandler} was added to the actual context and it's ready to handle events. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerAdded(final ChannelHandlerContext context) { this.traceOperation(context, "handlerAdded"); } /** * Gets called after the {@link ChannelHandler} was removed from the actual context and it doesn't handle events * anymore. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerRemoved(final ChannelHandlerContext context) { this.traceOperation(context, "handlerRemoved"); } /** * The {@link Channel} of the {@link ChannelHandlerContext} is now active * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelActive(final ChannelHandlerContext context) { this.traceOperation(context, "channelActive"); context.fireChannelActive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was registered and has reached the end of its lifetime * <p> * This method will only be called after the channel is closed. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelInactive(final ChannelHandlerContext context) { this.traceOperation(context, "channelInactive"); context.fireChannelInactive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} has read a message from its peer. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs. * @param message The message read. */ @Override public void channelRead(final ChannelHandlerContext context, final Object message) { this.traceOperation(context, "channelRead"); try { if (message.getClass() == RntbdResponse.class) { try { this.messageReceived(context, (RntbdResponse) message); } catch (CorruptedFrameException error) { this.exceptionCaught(context, error); } catch (Throwable throwable) { reportIssue(context, "{} ", message, throwable); this.exceptionCaught(context, throwable); } } else { final IllegalStateException error = new IllegalStateException( lenientFormat("expected message of %s, not %s: %s", RntbdResponse.class, message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } } finally { if (message instanceof ReferenceCounted) { boolean released = ((ReferenceCounted) message).release(); reportIssueUnless(released, context, "failed to release message: {}", message); } } } /** * The {@link Channel} of the {@link ChannelHandlerContext} has fully consumed the most-recent message read. * <p> * If {@link ChannelOption * {@link Channel} will be made until {@link ChannelHandlerContext * for outbound messages to be written. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelReadComplete(final ChannelHandlerContext context) { this.traceOperation(context, "channelReadComplete"); this.timestamps.channelReadCompleted(); context.fireChannelReadComplete(); } /** * Constructs a {@link CoalescingBufferQueue} for buffering encoded requests until we have an {@link RntbdRequest} * <p> * This method then calls {@link ChannelHandlerContext * {@link ChannelInboundHandler} in the {@link ChannelPipeline}. * <p> * Sub-classes may override this method to change behavior. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made */ @Override public void channelRegistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelRegistered"); reportIssueUnless(this.pendingWrites == null, context, "pendingWrites: {}", pendingWrites); this.pendingWrites = new CoalescingBufferQueue(context.channel()); context.fireChannelRegistered(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was unregistered from its {@link EventLoop} * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelUnregistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelUnregistered"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CHANNEL_UNREGISTERED); } else { logger.debug("{} channelUnregistered exceptionally", context); } context.fireChannelUnregistered(); } /** * Gets called once the writable state of a {@link Channel} changed. You can check the state with * {@link Channel * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelWritabilityChanged(final ChannelHandlerContext context) { this.traceOperation(context, "channelWritabilityChanged"); context.fireChannelWritabilityChanged(); } /** * Processes {@link ChannelHandlerContext * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param cause Exception caught */ @Override @SuppressWarnings("deprecation") public void exceptionCaught(final ChannelHandlerContext context, final Throwable cause) { this.traceOperation(context, "exceptionCaught", cause); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, cause); logger.debug("{} closing due to:", context, cause); context.flush().close(); } } /** * Processes inbound events triggered by channel handlers in the {@link RntbdClientChannelHandler} pipeline * <p> * All but inbound request management events are ignored. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param event An object representing a user event */ @Override public void userEventTriggered(final ChannelHandlerContext context, final Object event) { this.traceOperation(context, "userEventTriggered", event); try { if (event instanceof IdleStateEvent) { this.healthChecker.isHealthy(context.channel()).addListener((Future<Boolean> future) -> { final Throwable cause; if (future.isSuccess()) { if (future.get()) { return; } cause = UnhealthyChannelException.INSTANCE; } else { cause = future.cause(); } this.exceptionCaught(context, cause); }); return; } if (event instanceof RntbdContext) { this.contextFuture.complete((RntbdContext) event); this.removeContextNegotiatorAndFlushPendingWrites(context); return; } if (event instanceof RntbdContextException) { this.contextFuture.completeExceptionally((RntbdContextException) event); context.pipeline().flush().close(); return; } context.fireUserEventTriggered(event); } catch (Throwable error) { reportIssue(context, "{}: ", event, error); this.exceptionCaught(context, error); } } /** * Called once a bind operation is made. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made * @param localAddress the {@link SocketAddress} to which it should bound * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void bind(final ChannelHandlerContext context, final SocketAddress localAddress, final ChannelPromise promise) { this.traceOperation(context, "bind", localAddress); context.bind(localAddress, promise); } /** * Called once a close operation is made. * * @param context the {@link ChannelHandlerContext} for which the close operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override /** * Called once a connect operation is made. * * @param context the {@link ChannelHandlerContext} for which the connect operation is made * @param remoteAddress the {@link SocketAddress} to which it should connect * @param localAddress the {@link SocketAddress} which is used as source on connect * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void connect( final ChannelHandlerContext context, final SocketAddress remoteAddress, final SocketAddress localAddress, final ChannelPromise promise ) { this.traceOperation(context, "connect", remoteAddress, localAddress); context.connect(remoteAddress, localAddress, promise); } /** * Called once a deregister operation is made from the current registered {@link EventLoop}. * * @param context the {@link ChannelHandlerContext} for which the deregister operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void deregister(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "deregister"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_DEREGISTER); } else { logger.debug("{} deregistered exceptionally", context); } context.deregister(promise); } /** * Called once a disconnect operation is made. * * @param context the {@link ChannelHandlerContext} for which the disconnect operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void disconnect(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "disconnect"); context.disconnect(promise); } /** * Called once a flush operation is made * <p> * The flush operation will try to flush out all previous written messages that are pending. * * @param context the {@link ChannelHandlerContext} for which the flush operation is made */ @Override public void flush(final ChannelHandlerContext context) { this.traceOperation(context, "flush"); context.flush(); } /** * Intercepts {@link ChannelHandlerContext * * @param context the {@link ChannelHandlerContext} for which the read operation is made */ @Override public void read(final ChannelHandlerContext context) { this.traceOperation(context, "read"); context.read(); } /** * Called once a write operation is made * <p> * The write operation will send messages through the {@link ChannelPipeline} which are then ready to be flushed * to the actual {@link Channel}. This will occur when {@link Channel * * @param context the {@link ChannelHandlerContext} for which the write operation is made * @param message the message to write * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void write(final ChannelHandlerContext context, final Object message, final ChannelPromise promise) { this.traceOperation(context, "write", message); if (message instanceof RntbdRequestRecord) { final RntbdRequestRecord record = (RntbdRequestRecord) message; this.timestamps.channelWriteAttempted(); record.setSendingRequestHasStarted(); context.write(this.addPendingRequestRecord(context, record), promise).addListener(completed -> { record.stage(RntbdRequestRecord.Stage.SENT); if (completed.isSuccess()) { this.timestamps.channelWriteCompleted(); } }); return; } if (message == RntbdHealthCheckRequest.MESSAGE) { context.write(RntbdHealthCheckRequest.MESSAGE, promise).addListener(completed -> { if (completed.isSuccess()) { this.timestamps.channelPingCompleted(); } }); return; } final IllegalStateException error = new IllegalStateException(lenientFormat("message of %s: %s", message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } int pendingRequestCount() { return this.pendingRequests.size(); } Optional<RntbdContext> rntbdContext() { return Optional.of(this.contextFuture.getNow(null)); } CompletableFuture<RntbdContextRequest> rntbdContextRequestFuture() { return this.contextRequestFuture; } boolean hasRequestedRntbdContext() { return this.contextRequestFuture.getNow(null) != null; } boolean hasRntbdContext() { return this.contextFuture.getNow(null) != null; } boolean isServiceable(final int demand) { reportIssueUnless(this.hasRequestedRntbdContext(), this, "Direct TCP context request was not issued"); final int limit = this.hasRntbdContext() ? this.pendingRequestLimit : Math.min(this.pendingRequestLimit, demand); return this.pendingRequests.size() < limit; } void pendWrite(final ByteBuf out, final ChannelPromise promise) { this.pendingWrites.add(out, promise); } Timestamps snapshotTimestamps() { return new Timestamps(this.timestamps); } private RntbdRequestRecord addPendingRequestRecord(final ChannelHandlerContext context, final RntbdRequestRecord record) { return this.pendingRequests.compute(record.transportRequestId(), (id, current) -> { reportIssueUnless(current == null, context, "id: {}, current: {}, request: {}", record); record.pendingRequestQueueSize(pendingRequests.size()); final Timeout pendingRequestTimeout = record.newTimeout(timeout -> { requestExpirationExecutor.execute(record::expire); }); record.whenComplete((response, error) -> { this.pendingRequests.remove(id); pendingRequestTimeout.cancel(); }); return record; }); } private void completeAllPendingRequestsExceptionally( final ChannelHandlerContext context, final Throwable throwable ) { reportIssueUnless(!this.closingExceptionally, context, "", throwable); this.closingExceptionally = true; if (this.pendingWrites != null && !this.pendingWrites.isEmpty()) { this.pendingWrites.releaseAndFailAll(context, throwable); } if (this.pendingRequests.isEmpty()) { return; } if (!this.contextRequestFuture.isDone()) { this.contextRequestFuture.completeExceptionally(throwable); } if (!this.contextFuture.isDone()) { this.contextFuture.completeExceptionally(throwable); } final int count = this.pendingRequests.size(); Exception contextRequestException = null; String phrase = null; if (this.contextRequestFuture.isCompletedExceptionally()) { try { this.contextRequestFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request write cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request write failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request write failed"; contextRequestException = new ChannelException(error); } } else if (this.contextFuture.isCompletedExceptionally()) { try { this.contextFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request read cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request read failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request read failed"; contextRequestException = new ChannelException(error); } } else { phrase = "closed exceptionally"; } final String message = lenientFormat("%s %s with %s pending requests", context, phrase, count); final Exception cause; if (throwable instanceof ClosedChannelException) { cause = contextRequestException == null ? (ClosedChannelException) throwable : contextRequestException; } else { cause = throwable instanceof Exception ? (Exception) throwable : new ChannelException(throwable); } for (RntbdRequestRecord record : this.pendingRequests.values()) { final Map<String, String> requestHeaders = record.args().serviceRequest().getHeaders(); final String requestUri = record.args().physicalAddress().toString(); final GoneException error = new GoneException(message, cause, null, requestUri); BridgeInternal.setRequestHeaders(error, requestHeaders); record.completeExceptionally(error); } } /** * This method is called for each incoming message of type {@link RntbdResponse} to complete a request. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager request manager} belongs. * @param response the {@link RntbdResponse message} received. */ private void messageReceived(final ChannelHandlerContext context, final RntbdResponse response) { final Long transportRequestId = response.getTransportRequestId(); if (transportRequestId == null) { reportIssue(context, "response ignored because its transportRequestId is missing: {}", response); return; } final RntbdRequestRecord requestRecord = this.pendingRequests.get(transportRequestId); if (requestRecord == null) { logger.debug("response {} ignored because its requestRecord is missing: {}", transportRequestId, response); return; } requestRecord.responseLength(response.getMessageLength()); requestRecord.stage(RntbdRequestRecord.Stage.RECEIVED); final HttpResponseStatus status = response.getStatus(); final UUID activityId = response.getActivityId(); final int statusCode = status.code(); if (HttpResponseStatus.OK.code() <= statusCode && statusCode < HttpResponseStatus.MULTIPLE_CHOICES.code()) { final StoreResponse storeResponse = response.toStoreResponse(this.contextFuture.getNow(null)); requestRecord.complete(storeResponse); } else { final CosmosException cause; final long lsn = response.getHeader(RntbdResponseHeader.LSN); final String partitionKeyRangeId = response.getHeader(RntbdResponseHeader.PartitionKeyRangeId); final CosmosError error = response.hasPayload() ? new CosmosError(RntbdObjectMapper.readTree(response)) : new CosmosError(Integer.toString(statusCode), status.reasonPhrase(), status.codeClass().name()); final Map<String, String> responseHeaders = response.getHeaders().asMap( this.rntbdContext().orElseThrow(IllegalStateException::new), activityId ); switch (status.code()) { case StatusCodes.BADREQUEST: cause = new BadRequestException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.CONFLICT: cause = new ConflictException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.FORBIDDEN: cause = new ForbiddenException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.GONE: final int subStatusCode = Math.toIntExact(response.getHeader(RntbdResponseHeader.SubStatus)); switch (subStatusCode) { case SubStatusCodes.COMPLETING_SPLIT: cause = new PartitionKeyRangeIsSplittingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.COMPLETING_PARTITION_MIGRATION: cause = new PartitionIsMigratingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.NAME_CACHE_IS_STALE: cause = new InvalidPartitionException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.PARTITION_KEY_RANGE_GONE: cause = new PartitionKeyRangeGoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = new GoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; } break; case StatusCodes.INTERNAL_SERVER_ERROR: cause = new InternalServerErrorException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.LOCKED: cause = new LockedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.METHOD_NOT_ALLOWED: cause = new MethodNotAllowedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.NOTFOUND: cause = new NotFoundException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.PRECONDITION_FAILED: cause = new PreconditionFailedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_ENTITY_TOO_LARGE: cause = new RequestEntityTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_TIMEOUT: Exception inner = new RequestTimeoutException(error, lsn, partitionKeyRangeId, responseHeaders); String resourceAddress = requestRecord.args().physicalAddress() != null ? requestRecord.args().physicalAddress().toString() : null; cause = new GoneException(resourceAddress, error, lsn, partitionKeyRangeId, responseHeaders, inner); break; case StatusCodes.RETRY_WITH: cause = new RetryWithException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.SERVICE_UNAVAILABLE: cause = new ServiceUnavailableException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.TOO_MANY_REQUESTS: cause = new RequestRateTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.UNAUTHORIZED: cause = new UnauthorizedException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = BridgeInternal.createCosmosException(status.code(), error, responseHeaders); break; } requestRecord.completeExceptionally(cause); } } private void removeContextNegotiatorAndFlushPendingWrites(final ChannelHandlerContext context) { final RntbdContextNegotiator negotiator = context.pipeline().get(RntbdContextNegotiator.class); negotiator.removeInboundHandler(); negotiator.removeOutboundHandler(); if (!this.pendingWrites.isEmpty()) { this.pendingWrites.writeAndRemoveAll(context); context.flush(); } } private static void reportIssue(final Object subject, final String format, final Object... args) { RntbdReporter.reportIssue(logger, subject, format, args); } private static void reportIssueUnless( final boolean predicate, final Object subject, final String format, final Object... args ) { RntbdReporter.reportIssueUnless(logger, predicate, subject, format, args); } private void traceOperation(final ChannelHandlerContext context, final String operationName, final Object... args) { logger.debug("{}\n{}\n{}", operationName, context, args); } private static final class UnhealthyChannelException extends ChannelException { static final UnhealthyChannelException INSTANCE = new UnhealthyChannelException(); private UnhealthyChannelException() { super("health check failed"); } @Override public Throwable fillInStackTrace() { return this; } } }
Fixed
public void close(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "close"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CLOSE); } else { logger.debug("{} closed exceptionally", context); } final SslHandler sslHandler = context.pipeline().get(SslHandler.class); if (sslHandler != null) { try { sslHandler.closeOutbound(); } catch (Exception exception) { if (exception instanceof SSLException) { logger.debug( "SslException when attempting to close the outbound SSL connection: {}", exception.toString()); } else { logger.warn( "Exception when attempting to close the outbound SSL connection: {}", exception.toString()); throw exception; } } } context.close(promise); }
exception.toString());
public void close(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "close"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CLOSE); } else { logger.debug("{} closed exceptionally", context); } final SslHandler sslHandler = context.pipeline().get(SslHandler.class); if (sslHandler != null) { try { sslHandler.closeOutbound(); } catch (Exception exception) { if (exception instanceof SSLException) { logger.debug( "SslException when attempting to close the outbound SSL connection: ", exception); } else { logger.warn( "Exception when attempting to close the outbound SSL connection: ", exception); throw exception; } } } context.close(promise); }
class RntbdRequestManager implements ChannelHandler, ChannelInboundHandler, ChannelOutboundHandler { private static final ClosedChannelException ON_CHANNEL_UNREGISTERED = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "channelUnregistered"); private static final ClosedChannelException ON_CLOSE = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "close"); private static final ClosedChannelException ON_DEREGISTER = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "deregister"); private static final EventExecutor requestExpirationExecutor = new DefaultEventExecutor(new RntbdThreadFactory( "request-expirator", true, Thread.NORM_PRIORITY)); private static final Logger logger = LoggerFactory.getLogger(RntbdRequestManager.class); private final CompletableFuture<RntbdContext> contextFuture = new CompletableFuture<>(); private final CompletableFuture<RntbdContextRequest> contextRequestFuture = new CompletableFuture<>(); private final ChannelHealthChecker healthChecker; private final int pendingRequestLimit; private final ConcurrentHashMap<Long, RntbdRequestRecord> pendingRequests; private final Timestamps timestamps = new Timestamps(); private boolean closingExceptionally = false; private CoalescingBufferQueue pendingWrites; public RntbdRequestManager(final ChannelHealthChecker healthChecker, final int pendingRequestLimit) { checkArgument(pendingRequestLimit > 0, "pendingRequestLimit: %s", pendingRequestLimit); checkNotNull(healthChecker, "healthChecker"); this.pendingRequests = new ConcurrentHashMap<>(pendingRequestLimit); this.pendingRequestLimit = pendingRequestLimit; this.healthChecker = healthChecker; } /** * Gets called after the {@link ChannelHandler} was added to the actual context and it's ready to handle events. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerAdded(final ChannelHandlerContext context) { this.traceOperation(context, "handlerAdded"); } /** * Gets called after the {@link ChannelHandler} was removed from the actual context and it doesn't handle events * anymore. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerRemoved(final ChannelHandlerContext context) { this.traceOperation(context, "handlerRemoved"); } /** * The {@link Channel} of the {@link ChannelHandlerContext} is now active * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelActive(final ChannelHandlerContext context) { this.traceOperation(context, "channelActive"); context.fireChannelActive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was registered and has reached the end of its lifetime * <p> * This method will only be called after the channel is closed. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelInactive(final ChannelHandlerContext context) { this.traceOperation(context, "channelInactive"); context.fireChannelInactive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} has read a message from its peer. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs. * @param message The message read. */ @Override public void channelRead(final ChannelHandlerContext context, final Object message) { this.traceOperation(context, "channelRead"); try { if (message.getClass() == RntbdResponse.class) { try { this.messageReceived(context, (RntbdResponse) message); } catch (CorruptedFrameException error) { this.exceptionCaught(context, error); } catch (Throwable throwable) { reportIssue(context, "{} ", message, throwable); this.exceptionCaught(context, throwable); } } else { final IllegalStateException error = new IllegalStateException( lenientFormat("expected message of %s, not %s: %s", RntbdResponse.class, message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } } finally { if (message instanceof ReferenceCounted) { boolean released = ((ReferenceCounted) message).release(); reportIssueUnless(released, context, "failed to release message: {}", message); } } } /** * The {@link Channel} of the {@link ChannelHandlerContext} has fully consumed the most-recent message read. * <p> * If {@link ChannelOption * {@link Channel} will be made until {@link ChannelHandlerContext * for outbound messages to be written. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelReadComplete(final ChannelHandlerContext context) { this.traceOperation(context, "channelReadComplete"); this.timestamps.channelReadCompleted(); context.fireChannelReadComplete(); } /** * Constructs a {@link CoalescingBufferQueue} for buffering encoded requests until we have an {@link RntbdRequest} * <p> * This method then calls {@link ChannelHandlerContext * {@link ChannelInboundHandler} in the {@link ChannelPipeline}. * <p> * Sub-classes may override this method to change behavior. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made */ @Override public void channelRegistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelRegistered"); reportIssueUnless(this.pendingWrites == null, context, "pendingWrites: {}", pendingWrites); this.pendingWrites = new CoalescingBufferQueue(context.channel()); context.fireChannelRegistered(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was unregistered from its {@link EventLoop} * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelUnregistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelUnregistered"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CHANNEL_UNREGISTERED); } else { logger.debug("{} channelUnregistered exceptionally", context); } context.fireChannelUnregistered(); } /** * Gets called once the writable state of a {@link Channel} changed. You can check the state with * {@link Channel * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelWritabilityChanged(final ChannelHandlerContext context) { this.traceOperation(context, "channelWritabilityChanged"); context.fireChannelWritabilityChanged(); } /** * Processes {@link ChannelHandlerContext * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param cause Exception caught */ @Override @SuppressWarnings("deprecation") public void exceptionCaught(final ChannelHandlerContext context, final Throwable cause) { this.traceOperation(context, "exceptionCaught", cause); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, cause); logger.debug("{} closing due to:", context, cause); context.flush().close(); } } /** * Processes inbound events triggered by channel handlers in the {@link RntbdClientChannelHandler} pipeline * <p> * All but inbound request management events are ignored. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param event An object representing a user event */ @Override public void userEventTriggered(final ChannelHandlerContext context, final Object event) { this.traceOperation(context, "userEventTriggered", event); try { if (event instanceof IdleStateEvent) { this.healthChecker.isHealthy(context.channel()).addListener((Future<Boolean> future) -> { final Throwable cause; if (future.isSuccess()) { if (future.get()) { return; } cause = UnhealthyChannelException.INSTANCE; } else { cause = future.cause(); } this.exceptionCaught(context, cause); }); return; } if (event instanceof RntbdContext) { this.contextFuture.complete((RntbdContext) event); this.removeContextNegotiatorAndFlushPendingWrites(context); return; } if (event instanceof RntbdContextException) { this.contextFuture.completeExceptionally((RntbdContextException) event); context.pipeline().flush().close(); return; } context.fireUserEventTriggered(event); } catch (Throwable error) { reportIssue(context, "{}: ", event, error); this.exceptionCaught(context, error); } } /** * Called once a bind operation is made. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made * @param localAddress the {@link SocketAddress} to which it should bound * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void bind(final ChannelHandlerContext context, final SocketAddress localAddress, final ChannelPromise promise) { this.traceOperation(context, "bind", localAddress); context.bind(localAddress, promise); } /** * Called once a close operation is made. * * @param context the {@link ChannelHandlerContext} for which the close operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override /** * Called once a connect operation is made. * * @param context the {@link ChannelHandlerContext} for which the connect operation is made * @param remoteAddress the {@link SocketAddress} to which it should connect * @param localAddress the {@link SocketAddress} which is used as source on connect * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void connect( final ChannelHandlerContext context, final SocketAddress remoteAddress, final SocketAddress localAddress, final ChannelPromise promise ) { this.traceOperation(context, "connect", remoteAddress, localAddress); context.connect(remoteAddress, localAddress, promise); } /** * Called once a deregister operation is made from the current registered {@link EventLoop}. * * @param context the {@link ChannelHandlerContext} for which the deregister operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void deregister(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "deregister"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_DEREGISTER); } else { logger.debug("{} deregistered exceptionally", context); } context.deregister(promise); } /** * Called once a disconnect operation is made. * * @param context the {@link ChannelHandlerContext} for which the disconnect operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void disconnect(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "disconnect"); context.disconnect(promise); } /** * Called once a flush operation is made * <p> * The flush operation will try to flush out all previous written messages that are pending. * * @param context the {@link ChannelHandlerContext} for which the flush operation is made */ @Override public void flush(final ChannelHandlerContext context) { this.traceOperation(context, "flush"); context.flush(); } /** * Intercepts {@link ChannelHandlerContext * * @param context the {@link ChannelHandlerContext} for which the read operation is made */ @Override public void read(final ChannelHandlerContext context) { this.traceOperation(context, "read"); context.read(); } /** * Called once a write operation is made * <p> * The write operation will send messages through the {@link ChannelPipeline} which are then ready to be flushed * to the actual {@link Channel}. This will occur when {@link Channel * * @param context the {@link ChannelHandlerContext} for which the write operation is made * @param message the message to write * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void write(final ChannelHandlerContext context, final Object message, final ChannelPromise promise) { this.traceOperation(context, "write", message); if (message instanceof RntbdRequestRecord) { final RntbdRequestRecord record = (RntbdRequestRecord) message; this.timestamps.channelWriteAttempted(); record.setSendingRequestHasStarted(); context.write(this.addPendingRequestRecord(context, record), promise).addListener(completed -> { record.stage(RntbdRequestRecord.Stage.SENT); if (completed.isSuccess()) { this.timestamps.channelWriteCompleted(); } }); return; } if (message == RntbdHealthCheckRequest.MESSAGE) { context.write(RntbdHealthCheckRequest.MESSAGE, promise).addListener(completed -> { if (completed.isSuccess()) { this.timestamps.channelPingCompleted(); } }); return; } final IllegalStateException error = new IllegalStateException(lenientFormat("message of %s: %s", message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } int pendingRequestCount() { return this.pendingRequests.size(); } Optional<RntbdContext> rntbdContext() { return Optional.of(this.contextFuture.getNow(null)); } CompletableFuture<RntbdContextRequest> rntbdContextRequestFuture() { return this.contextRequestFuture; } boolean hasRequestedRntbdContext() { return this.contextRequestFuture.getNow(null) != null; } boolean hasRntbdContext() { return this.contextFuture.getNow(null) != null; } boolean isServiceable(final int demand) { reportIssueUnless(this.hasRequestedRntbdContext(), this, "Direct TCP context request was not issued"); final int limit = this.hasRntbdContext() ? this.pendingRequestLimit : Math.min(this.pendingRequestLimit, demand); return this.pendingRequests.size() < limit; } void pendWrite(final ByteBuf out, final ChannelPromise promise) { this.pendingWrites.add(out, promise); } Timestamps snapshotTimestamps() { return new Timestamps(this.timestamps); } private RntbdRequestRecord addPendingRequestRecord(final ChannelHandlerContext context, final RntbdRequestRecord record) { return this.pendingRequests.compute(record.transportRequestId(), (id, current) -> { reportIssueUnless(current == null, context, "id: {}, current: {}, request: {}", record); record.pendingRequestQueueSize(pendingRequests.size()); final Timeout pendingRequestTimeout = record.newTimeout(timeout -> { requestExpirationExecutor.execute(record::expire); }); record.whenComplete((response, error) -> { this.pendingRequests.remove(id); pendingRequestTimeout.cancel(); }); return record; }); } private void completeAllPendingRequestsExceptionally( final ChannelHandlerContext context, final Throwable throwable ) { reportIssueUnless(!this.closingExceptionally, context, "", throwable); this.closingExceptionally = true; if (this.pendingWrites != null && !this.pendingWrites.isEmpty()) { this.pendingWrites.releaseAndFailAll(context, throwable); } if (this.pendingRequests.isEmpty()) { return; } if (!this.contextRequestFuture.isDone()) { this.contextRequestFuture.completeExceptionally(throwable); } if (!this.contextFuture.isDone()) { this.contextFuture.completeExceptionally(throwable); } final int count = this.pendingRequests.size(); Exception contextRequestException = null; String phrase = null; if (this.contextRequestFuture.isCompletedExceptionally()) { try { this.contextRequestFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request write cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request write failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request write failed"; contextRequestException = new ChannelException(error); } } else if (this.contextFuture.isCompletedExceptionally()) { try { this.contextFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request read cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request read failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request read failed"; contextRequestException = new ChannelException(error); } } else { phrase = "closed exceptionally"; } final String message = lenientFormat("%s %s with %s pending requests", context, phrase, count); final Exception cause; if (throwable instanceof ClosedChannelException) { cause = contextRequestException == null ? (ClosedChannelException) throwable : contextRequestException; } else { cause = throwable instanceof Exception ? (Exception) throwable : new ChannelException(throwable); } for (RntbdRequestRecord record : this.pendingRequests.values()) { final Map<String, String> requestHeaders = record.args().serviceRequest().getHeaders(); final String requestUri = record.args().physicalAddress().toString(); final GoneException error = new GoneException(message, cause, null, requestUri); BridgeInternal.setRequestHeaders(error, requestHeaders); record.completeExceptionally(error); } } /** * This method is called for each incoming message of type {@link RntbdResponse} to complete a request. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager request manager} belongs. * @param response the {@link RntbdResponse message} received. */ private void messageReceived(final ChannelHandlerContext context, final RntbdResponse response) { final Long transportRequestId = response.getTransportRequestId(); if (transportRequestId == null) { reportIssue(context, "response ignored because its transportRequestId is missing: {}", response); return; } final RntbdRequestRecord requestRecord = this.pendingRequests.get(transportRequestId); if (requestRecord == null) { logger.debug("response {} ignored because its requestRecord is missing: {}", transportRequestId, response); return; } requestRecord.responseLength(response.getMessageLength()); requestRecord.stage(RntbdRequestRecord.Stage.RECEIVED); final HttpResponseStatus status = response.getStatus(); final UUID activityId = response.getActivityId(); final int statusCode = status.code(); if (HttpResponseStatus.OK.code() <= statusCode && statusCode < HttpResponseStatus.MULTIPLE_CHOICES.code()) { final StoreResponse storeResponse = response.toStoreResponse(this.contextFuture.getNow(null)); requestRecord.complete(storeResponse); } else { final CosmosException cause; final long lsn = response.getHeader(RntbdResponseHeader.LSN); final String partitionKeyRangeId = response.getHeader(RntbdResponseHeader.PartitionKeyRangeId); final CosmosError error = response.hasPayload() ? new CosmosError(RntbdObjectMapper.readTree(response)) : new CosmosError(Integer.toString(statusCode), status.reasonPhrase(), status.codeClass().name()); final Map<String, String> responseHeaders = response.getHeaders().asMap( this.rntbdContext().orElseThrow(IllegalStateException::new), activityId ); switch (status.code()) { case StatusCodes.BADREQUEST: cause = new BadRequestException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.CONFLICT: cause = new ConflictException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.FORBIDDEN: cause = new ForbiddenException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.GONE: final int subStatusCode = Math.toIntExact(response.getHeader(RntbdResponseHeader.SubStatus)); switch (subStatusCode) { case SubStatusCodes.COMPLETING_SPLIT: cause = new PartitionKeyRangeIsSplittingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.COMPLETING_PARTITION_MIGRATION: cause = new PartitionIsMigratingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.NAME_CACHE_IS_STALE: cause = new InvalidPartitionException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.PARTITION_KEY_RANGE_GONE: cause = new PartitionKeyRangeGoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = new GoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; } break; case StatusCodes.INTERNAL_SERVER_ERROR: cause = new InternalServerErrorException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.LOCKED: cause = new LockedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.METHOD_NOT_ALLOWED: cause = new MethodNotAllowedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.NOTFOUND: cause = new NotFoundException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.PRECONDITION_FAILED: cause = new PreconditionFailedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_ENTITY_TOO_LARGE: cause = new RequestEntityTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_TIMEOUT: Exception inner = new RequestTimeoutException(error, lsn, partitionKeyRangeId, responseHeaders); String resourceAddress = requestRecord.args().physicalAddress() != null ? requestRecord.args().physicalAddress().toString() : null; cause = new GoneException(resourceAddress, error, lsn, partitionKeyRangeId, responseHeaders, inner); break; case StatusCodes.RETRY_WITH: cause = new RetryWithException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.SERVICE_UNAVAILABLE: cause = new ServiceUnavailableException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.TOO_MANY_REQUESTS: cause = new RequestRateTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.UNAUTHORIZED: cause = new UnauthorizedException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = BridgeInternal.createCosmosException(status.code(), error, responseHeaders); break; } requestRecord.completeExceptionally(cause); } } private void removeContextNegotiatorAndFlushPendingWrites(final ChannelHandlerContext context) { final RntbdContextNegotiator negotiator = context.pipeline().get(RntbdContextNegotiator.class); negotiator.removeInboundHandler(); negotiator.removeOutboundHandler(); if (!this.pendingWrites.isEmpty()) { this.pendingWrites.writeAndRemoveAll(context); context.flush(); } } private static void reportIssue(final Object subject, final String format, final Object... args) { RntbdReporter.reportIssue(logger, subject, format, args); } private static void reportIssueUnless( final boolean predicate, final Object subject, final String format, final Object... args ) { RntbdReporter.reportIssueUnless(logger, predicate, subject, format, args); } private void traceOperation(final ChannelHandlerContext context, final String operationName, final Object... args) { logger.debug("{}\n{}\n{}", operationName, context, args); } private static final class UnhealthyChannelException extends ChannelException { static final UnhealthyChannelException INSTANCE = new UnhealthyChannelException(); private UnhealthyChannelException() { super("health check failed"); } @Override public Throwable fillInStackTrace() { return this; } } }
class RntbdRequestManager implements ChannelHandler, ChannelInboundHandler, ChannelOutboundHandler { private static final ClosedChannelException ON_CHANNEL_UNREGISTERED = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "channelUnregistered"); private static final ClosedChannelException ON_CLOSE = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "close"); private static final ClosedChannelException ON_DEREGISTER = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "deregister"); private static final EventExecutor requestExpirationExecutor = new DefaultEventExecutor(new RntbdThreadFactory( "request-expirator", true, Thread.NORM_PRIORITY)); private static final Logger logger = LoggerFactory.getLogger(RntbdRequestManager.class); private final CompletableFuture<RntbdContext> contextFuture = new CompletableFuture<>(); private final CompletableFuture<RntbdContextRequest> contextRequestFuture = new CompletableFuture<>(); private final ChannelHealthChecker healthChecker; private final int pendingRequestLimit; private final ConcurrentHashMap<Long, RntbdRequestRecord> pendingRequests; private final Timestamps timestamps = new Timestamps(); private boolean closingExceptionally = false; private CoalescingBufferQueue pendingWrites; public RntbdRequestManager(final ChannelHealthChecker healthChecker, final int pendingRequestLimit) { checkArgument(pendingRequestLimit > 0, "pendingRequestLimit: %s", pendingRequestLimit); checkNotNull(healthChecker, "healthChecker"); this.pendingRequests = new ConcurrentHashMap<>(pendingRequestLimit); this.pendingRequestLimit = pendingRequestLimit; this.healthChecker = healthChecker; } /** * Gets called after the {@link ChannelHandler} was added to the actual context and it's ready to handle events. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerAdded(final ChannelHandlerContext context) { this.traceOperation(context, "handlerAdded"); } /** * Gets called after the {@link ChannelHandler} was removed from the actual context and it doesn't handle events * anymore. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerRemoved(final ChannelHandlerContext context) { this.traceOperation(context, "handlerRemoved"); } /** * The {@link Channel} of the {@link ChannelHandlerContext} is now active * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelActive(final ChannelHandlerContext context) { this.traceOperation(context, "channelActive"); context.fireChannelActive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was registered and has reached the end of its lifetime * <p> * This method will only be called after the channel is closed. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelInactive(final ChannelHandlerContext context) { this.traceOperation(context, "channelInactive"); context.fireChannelInactive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} has read a message from its peer. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs. * @param message The message read. */ @Override public void channelRead(final ChannelHandlerContext context, final Object message) { this.traceOperation(context, "channelRead"); try { if (message.getClass() == RntbdResponse.class) { try { this.messageReceived(context, (RntbdResponse) message); } catch (CorruptedFrameException error) { this.exceptionCaught(context, error); } catch (Throwable throwable) { reportIssue(context, "{} ", message, throwable); this.exceptionCaught(context, throwable); } } else { final IllegalStateException error = new IllegalStateException( lenientFormat("expected message of %s, not %s: %s", RntbdResponse.class, message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } } finally { if (message instanceof ReferenceCounted) { boolean released = ((ReferenceCounted) message).release(); reportIssueUnless(released, context, "failed to release message: {}", message); } } } /** * The {@link Channel} of the {@link ChannelHandlerContext} has fully consumed the most-recent message read. * <p> * If {@link ChannelOption * {@link Channel} will be made until {@link ChannelHandlerContext * for outbound messages to be written. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelReadComplete(final ChannelHandlerContext context) { this.traceOperation(context, "channelReadComplete"); this.timestamps.channelReadCompleted(); context.fireChannelReadComplete(); } /** * Constructs a {@link CoalescingBufferQueue} for buffering encoded requests until we have an {@link RntbdRequest} * <p> * This method then calls {@link ChannelHandlerContext * {@link ChannelInboundHandler} in the {@link ChannelPipeline}. * <p> * Sub-classes may override this method to change behavior. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made */ @Override public void channelRegistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelRegistered"); reportIssueUnless(this.pendingWrites == null, context, "pendingWrites: {}", pendingWrites); this.pendingWrites = new CoalescingBufferQueue(context.channel()); context.fireChannelRegistered(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was unregistered from its {@link EventLoop} * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelUnregistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelUnregistered"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CHANNEL_UNREGISTERED); } else { logger.debug("{} channelUnregistered exceptionally", context); } context.fireChannelUnregistered(); } /** * Gets called once the writable state of a {@link Channel} changed. You can check the state with * {@link Channel * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelWritabilityChanged(final ChannelHandlerContext context) { this.traceOperation(context, "channelWritabilityChanged"); context.fireChannelWritabilityChanged(); } /** * Processes {@link ChannelHandlerContext * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param cause Exception caught */ @Override @SuppressWarnings("deprecation") public void exceptionCaught(final ChannelHandlerContext context, final Throwable cause) { this.traceOperation(context, "exceptionCaught", cause); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, cause); logger.debug("{} closing due to:", context, cause); context.flush().close(); } } /** * Processes inbound events triggered by channel handlers in the {@link RntbdClientChannelHandler} pipeline * <p> * All but inbound request management events are ignored. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param event An object representing a user event */ @Override public void userEventTriggered(final ChannelHandlerContext context, final Object event) { this.traceOperation(context, "userEventTriggered", event); try { if (event instanceof IdleStateEvent) { this.healthChecker.isHealthy(context.channel()).addListener((Future<Boolean> future) -> { final Throwable cause; if (future.isSuccess()) { if (future.get()) { return; } cause = UnhealthyChannelException.INSTANCE; } else { cause = future.cause(); } this.exceptionCaught(context, cause); }); return; } if (event instanceof RntbdContext) { this.contextFuture.complete((RntbdContext) event); this.removeContextNegotiatorAndFlushPendingWrites(context); return; } if (event instanceof RntbdContextException) { this.contextFuture.completeExceptionally((RntbdContextException) event); context.pipeline().flush().close(); return; } context.fireUserEventTriggered(event); } catch (Throwable error) { reportIssue(context, "{}: ", event, error); this.exceptionCaught(context, error); } } /** * Called once a bind operation is made. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made * @param localAddress the {@link SocketAddress} to which it should bound * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void bind(final ChannelHandlerContext context, final SocketAddress localAddress, final ChannelPromise promise) { this.traceOperation(context, "bind", localAddress); context.bind(localAddress, promise); } /** * Called once a close operation is made. * * @param context the {@link ChannelHandlerContext} for which the close operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override /** * Called once a connect operation is made. * * @param context the {@link ChannelHandlerContext} for which the connect operation is made * @param remoteAddress the {@link SocketAddress} to which it should connect * @param localAddress the {@link SocketAddress} which is used as source on connect * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void connect( final ChannelHandlerContext context, final SocketAddress remoteAddress, final SocketAddress localAddress, final ChannelPromise promise ) { this.traceOperation(context, "connect", remoteAddress, localAddress); context.connect(remoteAddress, localAddress, promise); } /** * Called once a deregister operation is made from the current registered {@link EventLoop}. * * @param context the {@link ChannelHandlerContext} for which the deregister operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void deregister(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "deregister"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_DEREGISTER); } else { logger.debug("{} deregistered exceptionally", context); } context.deregister(promise); } /** * Called once a disconnect operation is made. * * @param context the {@link ChannelHandlerContext} for which the disconnect operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void disconnect(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "disconnect"); context.disconnect(promise); } /** * Called once a flush operation is made * <p> * The flush operation will try to flush out all previous written messages that are pending. * * @param context the {@link ChannelHandlerContext} for which the flush operation is made */ @Override public void flush(final ChannelHandlerContext context) { this.traceOperation(context, "flush"); context.flush(); } /** * Intercepts {@link ChannelHandlerContext * * @param context the {@link ChannelHandlerContext} for which the read operation is made */ @Override public void read(final ChannelHandlerContext context) { this.traceOperation(context, "read"); context.read(); } /** * Called once a write operation is made * <p> * The write operation will send messages through the {@link ChannelPipeline} which are then ready to be flushed * to the actual {@link Channel}. This will occur when {@link Channel * * @param context the {@link ChannelHandlerContext} for which the write operation is made * @param message the message to write * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void write(final ChannelHandlerContext context, final Object message, final ChannelPromise promise) { this.traceOperation(context, "write", message); if (message instanceof RntbdRequestRecord) { final RntbdRequestRecord record = (RntbdRequestRecord) message; this.timestamps.channelWriteAttempted(); record.setSendingRequestHasStarted(); context.write(this.addPendingRequestRecord(context, record), promise).addListener(completed -> { record.stage(RntbdRequestRecord.Stage.SENT); if (completed.isSuccess()) { this.timestamps.channelWriteCompleted(); } }); return; } if (message == RntbdHealthCheckRequest.MESSAGE) { context.write(RntbdHealthCheckRequest.MESSAGE, promise).addListener(completed -> { if (completed.isSuccess()) { this.timestamps.channelPingCompleted(); } }); return; } final IllegalStateException error = new IllegalStateException(lenientFormat("message of %s: %s", message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } int pendingRequestCount() { return this.pendingRequests.size(); } Optional<RntbdContext> rntbdContext() { return Optional.of(this.contextFuture.getNow(null)); } CompletableFuture<RntbdContextRequest> rntbdContextRequestFuture() { return this.contextRequestFuture; } boolean hasRequestedRntbdContext() { return this.contextRequestFuture.getNow(null) != null; } boolean hasRntbdContext() { return this.contextFuture.getNow(null) != null; } boolean isServiceable(final int demand) { reportIssueUnless(this.hasRequestedRntbdContext(), this, "Direct TCP context request was not issued"); final int limit = this.hasRntbdContext() ? this.pendingRequestLimit : Math.min(this.pendingRequestLimit, demand); return this.pendingRequests.size() < limit; } void pendWrite(final ByteBuf out, final ChannelPromise promise) { this.pendingWrites.add(out, promise); } Timestamps snapshotTimestamps() { return new Timestamps(this.timestamps); } private RntbdRequestRecord addPendingRequestRecord(final ChannelHandlerContext context, final RntbdRequestRecord record) { return this.pendingRequests.compute(record.transportRequestId(), (id, current) -> { reportIssueUnless(current == null, context, "id: {}, current: {}, request: {}", record); record.pendingRequestQueueSize(pendingRequests.size()); final Timeout pendingRequestTimeout = record.newTimeout(timeout -> { requestExpirationExecutor.execute(record::expire); }); record.whenComplete((response, error) -> { this.pendingRequests.remove(id); pendingRequestTimeout.cancel(); }); return record; }); } private void completeAllPendingRequestsExceptionally( final ChannelHandlerContext context, final Throwable throwable ) { reportIssueUnless(!this.closingExceptionally, context, "", throwable); this.closingExceptionally = true; if (this.pendingWrites != null && !this.pendingWrites.isEmpty()) { this.pendingWrites.releaseAndFailAll(context, throwable); } if (this.pendingRequests.isEmpty()) { return; } if (!this.contextRequestFuture.isDone()) { this.contextRequestFuture.completeExceptionally(throwable); } if (!this.contextFuture.isDone()) { this.contextFuture.completeExceptionally(throwable); } final int count = this.pendingRequests.size(); Exception contextRequestException = null; String phrase = null; if (this.contextRequestFuture.isCompletedExceptionally()) { try { this.contextRequestFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request write cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request write failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request write failed"; contextRequestException = new ChannelException(error); } } else if (this.contextFuture.isCompletedExceptionally()) { try { this.contextFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request read cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request read failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request read failed"; contextRequestException = new ChannelException(error); } } else { phrase = "closed exceptionally"; } final String message = lenientFormat("%s %s with %s pending requests", context, phrase, count); final Exception cause; if (throwable instanceof ClosedChannelException) { cause = contextRequestException == null ? (ClosedChannelException) throwable : contextRequestException; } else { cause = throwable instanceof Exception ? (Exception) throwable : new ChannelException(throwable); } for (RntbdRequestRecord record : this.pendingRequests.values()) { final Map<String, String> requestHeaders = record.args().serviceRequest().getHeaders(); final String requestUri = record.args().physicalAddress().toString(); final GoneException error = new GoneException(message, cause, null, requestUri); BridgeInternal.setRequestHeaders(error, requestHeaders); record.completeExceptionally(error); } } /** * This method is called for each incoming message of type {@link RntbdResponse} to complete a request. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager request manager} belongs. * @param response the {@link RntbdResponse message} received. */ private void messageReceived(final ChannelHandlerContext context, final RntbdResponse response) { final Long transportRequestId = response.getTransportRequestId(); if (transportRequestId == null) { reportIssue(context, "response ignored because its transportRequestId is missing: {}", response); return; } final RntbdRequestRecord requestRecord = this.pendingRequests.get(transportRequestId); if (requestRecord == null) { logger.debug("response {} ignored because its requestRecord is missing: {}", transportRequestId, response); return; } requestRecord.responseLength(response.getMessageLength()); requestRecord.stage(RntbdRequestRecord.Stage.RECEIVED); final HttpResponseStatus status = response.getStatus(); final UUID activityId = response.getActivityId(); final int statusCode = status.code(); if (HttpResponseStatus.OK.code() <= statusCode && statusCode < HttpResponseStatus.MULTIPLE_CHOICES.code()) { final StoreResponse storeResponse = response.toStoreResponse(this.contextFuture.getNow(null)); requestRecord.complete(storeResponse); } else { final CosmosException cause; final long lsn = response.getHeader(RntbdResponseHeader.LSN); final String partitionKeyRangeId = response.getHeader(RntbdResponseHeader.PartitionKeyRangeId); final CosmosError error = response.hasPayload() ? new CosmosError(RntbdObjectMapper.readTree(response)) : new CosmosError(Integer.toString(statusCode), status.reasonPhrase(), status.codeClass().name()); final Map<String, String> responseHeaders = response.getHeaders().asMap( this.rntbdContext().orElseThrow(IllegalStateException::new), activityId ); switch (status.code()) { case StatusCodes.BADREQUEST: cause = new BadRequestException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.CONFLICT: cause = new ConflictException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.FORBIDDEN: cause = new ForbiddenException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.GONE: final int subStatusCode = Math.toIntExact(response.getHeader(RntbdResponseHeader.SubStatus)); switch (subStatusCode) { case SubStatusCodes.COMPLETING_SPLIT: cause = new PartitionKeyRangeIsSplittingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.COMPLETING_PARTITION_MIGRATION: cause = new PartitionIsMigratingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.NAME_CACHE_IS_STALE: cause = new InvalidPartitionException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.PARTITION_KEY_RANGE_GONE: cause = new PartitionKeyRangeGoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = new GoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; } break; case StatusCodes.INTERNAL_SERVER_ERROR: cause = new InternalServerErrorException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.LOCKED: cause = new LockedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.METHOD_NOT_ALLOWED: cause = new MethodNotAllowedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.NOTFOUND: cause = new NotFoundException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.PRECONDITION_FAILED: cause = new PreconditionFailedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_ENTITY_TOO_LARGE: cause = new RequestEntityTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_TIMEOUT: Exception inner = new RequestTimeoutException(error, lsn, partitionKeyRangeId, responseHeaders); String resourceAddress = requestRecord.args().physicalAddress() != null ? requestRecord.args().physicalAddress().toString() : null; cause = new GoneException(resourceAddress, error, lsn, partitionKeyRangeId, responseHeaders, inner); break; case StatusCodes.RETRY_WITH: cause = new RetryWithException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.SERVICE_UNAVAILABLE: cause = new ServiceUnavailableException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.TOO_MANY_REQUESTS: cause = new RequestRateTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.UNAUTHORIZED: cause = new UnauthorizedException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = BridgeInternal.createCosmosException(status.code(), error, responseHeaders); break; } requestRecord.completeExceptionally(cause); } } private void removeContextNegotiatorAndFlushPendingWrites(final ChannelHandlerContext context) { final RntbdContextNegotiator negotiator = context.pipeline().get(RntbdContextNegotiator.class); negotiator.removeInboundHandler(); negotiator.removeOutboundHandler(); if (!this.pendingWrites.isEmpty()) { this.pendingWrites.writeAndRemoveAll(context); context.flush(); } } private static void reportIssue(final Object subject, final String format, final Object... args) { RntbdReporter.reportIssue(logger, subject, format, args); } private static void reportIssueUnless( final boolean predicate, final Object subject, final String format, final Object... args ) { RntbdReporter.reportIssueUnless(logger, predicate, subject, format, args); } private void traceOperation(final ChannelHandlerContext context, final String operationName, final Object... args) { logger.debug("{}\n{}\n{}", operationName, context, args); } private static final class UnhealthyChannelException extends ChannelException { static final UnhealthyChannelException INSTANCE = new UnhealthyChannelException(); private UnhealthyChannelException() { super("health check failed"); } @Override public Throwable fillInStackTrace() { return this; } } }
q - what is the default value of `OffsetDateTime`?
Mono<Response<Void>> publishTelemetryWithResponse(String digitalTwinId, String messageId, Object payload, DigitalTwinsSendTelemetryOptions options, Context context) { if (messageId == null || messageId.isEmpty()) { messageId = UUID.randomUUID().toString(); } com.azure.digitaltwins.core.implementation.models.DigitalTwinsSendTelemetryOptions protocolLayerOptions; if (options == null) { options = new DigitalTwinsSendTelemetryOptions(); protocolLayerOptions = null; } else { protocolLayerOptions = new com.azure.digitaltwins.core.implementation.models.DigitalTwinsSendTelemetryOptions() .setTraceparent(options.getTraceparent()) .setTracestate(options.getTracestate()); } return protocolLayer.getDigitalTwins().sendTelemetryWithResponseAsync( digitalTwinId, messageId, payload, options.getTimestamp().toString(), protocolLayerOptions, context); }
options.getTimestamp().toString(),
new DigitalTwinsSendTelemetryOptions(); protocolLayerOptions = null; } else { protocolLayerOptions = new com.azure.digitaltwins.core.implementation.models.DigitalTwinsSendTelemetryOptions() .setTraceparent(options.getTraceparent()) .setTracestate(options.getTracestate()); }
class such as {@link BasicDigitalTwin}
class such as {@link BasicDigitalTwin}
this feels like another parameter that we should send a default value (current timestamp), if the user doesn't supply a value
Mono<Response<Void>> publishTelemetryWithResponse(String digitalTwinId, String messageId, Object payload, DigitalTwinsSendTelemetryOptions options, Context context) { if (messageId == null || messageId.isEmpty()) { messageId = UUID.randomUUID().toString(); } com.azure.digitaltwins.core.implementation.models.DigitalTwinsSendTelemetryOptions protocolLayerOptions; if (options == null) { options = new DigitalTwinsSendTelemetryOptions(); protocolLayerOptions = null; } else { protocolLayerOptions = new com.azure.digitaltwins.core.implementation.models.DigitalTwinsSendTelemetryOptions() .setTraceparent(options.getTraceparent()) .setTracestate(options.getTracestate()); } return protocolLayer.getDigitalTwins().sendTelemetryWithResponseAsync( digitalTwinId, messageId, payload, options.getTimestamp().toString(), protocolLayerOptions, context); }
options.getTimestamp().toString(),
new DigitalTwinsSendTelemetryOptions(); protocolLayerOptions = null; } else { protocolLayerOptions = new com.azure.digitaltwins.core.implementation.models.DigitalTwinsSendTelemetryOptions() .setTraceparent(options.getTraceparent()) .setTracestate(options.getTracestate()); }
class such as {@link BasicDigitalTwin}
class such as {@link BasicDigitalTwin}
```java OffsetDateTime.now(ZoneOffset.UTC); ```
Mono<Response<Void>> publishTelemetryWithResponse(String digitalTwinId, String messageId, Object payload, DigitalTwinsSendTelemetryOptions options, Context context) { if (messageId == null || messageId.isEmpty()) { messageId = UUID.randomUUID().toString(); } com.azure.digitaltwins.core.implementation.models.DigitalTwinsSendTelemetryOptions protocolLayerOptions; if (options == null) { options = new DigitalTwinsSendTelemetryOptions(); protocolLayerOptions = null; } else { protocolLayerOptions = new com.azure.digitaltwins.core.implementation.models.DigitalTwinsSendTelemetryOptions() .setTraceparent(options.getTraceparent()) .setTracestate(options.getTracestate()); } return protocolLayer.getDigitalTwins().sendTelemetryWithResponseAsync( digitalTwinId, messageId, payload, options.getTimestamp().toString(), protocolLayerOptions, context); }
options.getTimestamp().toString(),
new DigitalTwinsSendTelemetryOptions(); protocolLayerOptions = null; } else { protocolLayerOptions = new com.azure.digitaltwins.core.implementation.models.DigitalTwinsSendTelemetryOptions() .setTraceparent(options.getTraceparent()) .setTracestate(options.getTracestate()); }
class such as {@link BasicDigitalTwin}
class such as {@link BasicDigitalTwin}
It's possible that we could make the default value null, and just not send that header though
Mono<Response<Void>> publishTelemetryWithResponse(String digitalTwinId, String messageId, Object payload, DigitalTwinsSendTelemetryOptions options, Context context) { if (messageId == null || messageId.isEmpty()) { messageId = UUID.randomUUID().toString(); } com.azure.digitaltwins.core.implementation.models.DigitalTwinsSendTelemetryOptions protocolLayerOptions; if (options == null) { options = new DigitalTwinsSendTelemetryOptions(); protocolLayerOptions = null; } else { protocolLayerOptions = new com.azure.digitaltwins.core.implementation.models.DigitalTwinsSendTelemetryOptions() .setTraceparent(options.getTraceparent()) .setTracestate(options.getTracestate()); } return protocolLayer.getDigitalTwins().sendTelemetryWithResponseAsync( digitalTwinId, messageId, payload, options.getTimestamp().toString(), protocolLayerOptions, context); }
options.getTimestamp().toString(),
new DigitalTwinsSendTelemetryOptions(); protocolLayerOptions = null; } else { protocolLayerOptions = new com.azure.digitaltwins.core.implementation.models.DigitalTwinsSendTelemetryOptions() .setTraceparent(options.getTraceparent()) .setTracestate(options.getTracestate()); }
class such as {@link BasicDigitalTwin}
class such as {@link BasicDigitalTwin}
Oh, I actually prefer sending the current time as default value. Why do you feel sending null is better?
Mono<Response<Void>> publishTelemetryWithResponse(String digitalTwinId, String messageId, Object payload, DigitalTwinsSendTelemetryOptions options, Context context) { if (messageId == null || messageId.isEmpty()) { messageId = UUID.randomUUID().toString(); } com.azure.digitaltwins.core.implementation.models.DigitalTwinsSendTelemetryOptions protocolLayerOptions; if (options == null) { options = new DigitalTwinsSendTelemetryOptions(); protocolLayerOptions = null; } else { protocolLayerOptions = new com.azure.digitaltwins.core.implementation.models.DigitalTwinsSendTelemetryOptions() .setTraceparent(options.getTraceparent()) .setTracestate(options.getTracestate()); } return protocolLayer.getDigitalTwins().sendTelemetryWithResponseAsync( digitalTwinId, messageId, payload, options.getTimestamp().toString(), protocolLayerOptions, context); }
options.getTimestamp().toString(),
new DigitalTwinsSendTelemetryOptions(); protocolLayerOptions = null; } else { protocolLayerOptions = new com.azure.digitaltwins.core.implementation.models.DigitalTwinsSendTelemetryOptions() .setTraceparent(options.getTraceparent()) .setTracestate(options.getTracestate()); }
class such as {@link BasicDigitalTwin}
class such as {@link BasicDigitalTwin}
I prefer sending the timestamp header with the default value by default, not omitting the header. The current implementation sends the timestamp by default. The only value I'd see in not sending the timestamp by default is to save on bytes over the wire
Mono<Response<Void>> publishTelemetryWithResponse(String digitalTwinId, String messageId, Object payload, DigitalTwinsSendTelemetryOptions options, Context context) { if (messageId == null || messageId.isEmpty()) { messageId = UUID.randomUUID().toString(); } com.azure.digitaltwins.core.implementation.models.DigitalTwinsSendTelemetryOptions protocolLayerOptions; if (options == null) { options = new DigitalTwinsSendTelemetryOptions(); protocolLayerOptions = null; } else { protocolLayerOptions = new com.azure.digitaltwins.core.implementation.models.DigitalTwinsSendTelemetryOptions() .setTraceparent(options.getTraceparent()) .setTracestate(options.getTracestate()); } return protocolLayer.getDigitalTwins().sendTelemetryWithResponseAsync( digitalTwinId, messageId, payload, options.getTimestamp().toString(), protocolLayerOptions, context); }
options.getTimestamp().toString(),
new DigitalTwinsSendTelemetryOptions(); protocolLayerOptions = null; } else { protocolLayerOptions = new com.azure.digitaltwins.core.implementation.models.DigitalTwinsSendTelemetryOptions() .setTraceparent(options.getTraceparent()) .setTracestate(options.getTracestate()); }
class such as {@link BasicDigitalTwin}
class such as {@link BasicDigitalTwin}
Sure, but for this case, I'd say that the timestamp information is valuable. My vote is to send the current timestamp, if user doesn't provide it.
Mono<Response<Void>> publishTelemetryWithResponse(String digitalTwinId, String messageId, Object payload, DigitalTwinsSendTelemetryOptions options, Context context) { if (messageId == null || messageId.isEmpty()) { messageId = UUID.randomUUID().toString(); } com.azure.digitaltwins.core.implementation.models.DigitalTwinsSendTelemetryOptions protocolLayerOptions; if (options == null) { options = new DigitalTwinsSendTelemetryOptions(); protocolLayerOptions = null; } else { protocolLayerOptions = new com.azure.digitaltwins.core.implementation.models.DigitalTwinsSendTelemetryOptions() .setTraceparent(options.getTraceparent()) .setTracestate(options.getTracestate()); } return protocolLayer.getDigitalTwins().sendTelemetryWithResponseAsync( digitalTwinId, messageId, payload, options.getTimestamp().toString(), protocolLayerOptions, context); }
options.getTimestamp().toString(),
new DigitalTwinsSendTelemetryOptions(); protocolLayerOptions = null; } else { protocolLayerOptions = new com.azure.digitaltwins.core.implementation.models.DigitalTwinsSendTelemetryOptions() .setTraceparent(options.getTraceparent()) .setTracestate(options.getTracestate()); }
class such as {@link BasicDigitalTwin}
class such as {@link BasicDigitalTwin}
Then I'll keep the implementation as is. If the user passes in a null options object, we will send the current timestamp for them
Mono<Response<Void>> publishTelemetryWithResponse(String digitalTwinId, String messageId, Object payload, DigitalTwinsSendTelemetryOptions options, Context context) { if (messageId == null || messageId.isEmpty()) { messageId = UUID.randomUUID().toString(); } com.azure.digitaltwins.core.implementation.models.DigitalTwinsSendTelemetryOptions protocolLayerOptions; if (options == null) { options = new DigitalTwinsSendTelemetryOptions(); protocolLayerOptions = null; } else { protocolLayerOptions = new com.azure.digitaltwins.core.implementation.models.DigitalTwinsSendTelemetryOptions() .setTraceparent(options.getTraceparent()) .setTracestate(options.getTracestate()); } return protocolLayer.getDigitalTwins().sendTelemetryWithResponseAsync( digitalTwinId, messageId, payload, options.getTimestamp().toString(), protocolLayerOptions, context); }
options.getTimestamp().toString(),
new DigitalTwinsSendTelemetryOptions(); protocolLayerOptions = null; } else { protocolLayerOptions = new com.azure.digitaltwins.core.implementation.models.DigitalTwinsSendTelemetryOptions() .setTraceparent(options.getTraceparent()) .setTracestate(options.getTracestate()); }
class such as {@link BasicDigitalTwin}
class such as {@link BasicDigitalTwin}
Do we also need to include lines 302-309 in the if playbackmode clause? I think the response was returning with 404 from the delay: https://dev.azure.com/azure-sdk/internal/_build/results?buildId=565232&view=logs&j=351a4efb-edfe-5bb4-cda1-1ace5fc54b68&t=332ee861-0df2-58fa-cf63-28af96dd70e8
public void canSendThenListReadReceiptsWithResponse() throws InterruptedException { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.sendReadReceiptWithResponse(response.getId()).block(); PagedIterable<ReadReceipt> readReceiptsResponse = new PagedIterable<ReadReceipt>(chatThreadClient.listReadReceipts()); List<ReadReceipt> returnedReadReceipts = new ArrayList<ReadReceipt>(); readReceiptsResponse.iterableByPage().forEach(resp -> { assertEquals(resp.getStatusCode(), 200); resp.getItems().forEach(item -> returnedReadReceipts.add(item)); }); if (interceptorManager.isPlaybackMode()) { assertTrue(returnedReadReceipts.size() > 0); checkReadReceiptListContainsMessageId(returnedReadReceipts, response.getId()); } }
public void canSendThenListReadReceiptsWithResponse() throws InterruptedException { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.sendReadReceiptWithResponse(response.getId()).block(); PagedIterable<ReadReceipt> readReceiptsResponse = new PagedIterable<ReadReceipt>(chatThreadClient.listReadReceipts()); List<ReadReceipt> returnedReadReceipts = new ArrayList<ReadReceipt>(); readReceiptsResponse.iterableByPage().forEach(resp -> { assertEquals(resp.getStatusCode(), 200); resp.getItems().forEach(item -> returnedReadReceipts.add(item)); }); if (interceptorManager.isPlaybackMode()) { assertTrue(returnedReadReceipts.size() > 0); checkReadReceiptListContainsMessageId(returnedReadReceipts, response.getId()); } }
class ChatThreadAsyncClientTest extends ChatClientTestBase { private ClientLogger logger = new ClientLogger(ChatThreadAsyncClientTest.class); private CommunicationIdentityClient communicationClient; private ChatAsyncClient client; private ChatThreadAsyncClient chatThreadClient; private String threadId; private CommunicationUser firstThreadMember; private CommunicationUser secondThreadMember; private CommunicationUser firstAddedThreadMember; private CommunicationUser secondAddedThreadMember; @Override protected void beforeTest() { super.beforeTest(); communicationClient = getCommunicationIdentityClientBuilder().buildClient(); firstThreadMember = communicationClient.createUser(); secondThreadMember = communicationClient.createUser(); List<String> scopes = new ArrayList<String>(Arrays.asList("chat")); CommunicationUserToken response = communicationClient.issueToken(firstThreadMember, scopes); client = getChatClientBuilder(response.getToken()).buildAsyncClient(); CreateChatThreadOptions threadRequest = ChatOptionsProvider.createThreadOptions( firstThreadMember.getId(), secondThreadMember.getId()); chatThreadClient = client.createChatThread(threadRequest).block(); threadId = chatThreadClient.getChatThreadId(); } @Override protected void afterTest() { super.afterTest(); } @Test public void canUpdateThread() { UpdateChatThreadOptions threadRequest = ChatOptionsProvider.updateThreadOptions(); chatThreadClient.updateChatThread(threadRequest).block(); ChatThread chatThread = client.getChatThread(threadId).block(); assertEquals(chatThread.getTopic(), threadRequest.getTopic()); } @Test public void canUpdateThreadWithResponse() { UpdateChatThreadOptions threadRequest = ChatOptionsProvider.updateThreadOptions(); chatThreadClient.updateChatThreadWithResponse(threadRequest).block().getValue(); ChatThread chatThread = client.getChatThread(threadId).block(); assertEquals(chatThread.getTopic(), threadRequest.getTopic()); } @Test public void canAddListAndRemoveMembersAsync() throws InterruptedException { firstAddedThreadMember = communicationClient.createUser(); secondAddedThreadMember = communicationClient.createUser(); AddChatThreadMembersOptions options = ChatOptionsProvider.addThreadMembersOptions( firstAddedThreadMember.getId(), secondAddedThreadMember.getId()); chatThreadClient.addMembers(options).block(); PagedIterable<ChatThreadMember> membersResponse = new PagedIterable<>(chatThreadClient.listMembers()); List<ChatThreadMember> returnedMembers = new ArrayList<ChatThreadMember>(); membersResponse.iterableByPage().forEach(resp -> { assertEquals(resp.getStatusCode(), 200); resp.getItems().forEach(item -> returnedMembers.add(item)); }); for (ChatThreadMember member: options.getMembers()) { assertTrue(checkMembersListContainsMemberId(returnedMembers, member.getUser().getId())); } assertTrue(returnedMembers.size() == 4); for (ChatThreadMember member: options.getMembers()) { chatThreadClient.removeMember(member.getUser()).block(); } } @Test public void canAddListAndRemoveMembersWithResponseAsync() throws InterruptedException { firstAddedThreadMember = communicationClient.createUser(); secondAddedThreadMember = communicationClient.createUser(); AddChatThreadMembersOptions options = ChatOptionsProvider.addThreadMembersOptions( firstAddedThreadMember.getId(), secondAddedThreadMember.getId()); chatThreadClient.addMembersWithResponse(options).block().getValue(); PagedIterable<ChatThreadMember> membersResponse = new PagedIterable<>(chatThreadClient.listMembers()); List<ChatThreadMember> returnedMembers = new ArrayList<ChatThreadMember>(); membersResponse.iterableByPage().forEach(resp -> { assertEquals(resp.getStatusCode(), 200); resp.getItems().forEach(item -> returnedMembers.add(item)); }); for (ChatThreadMember member: options.getMembers()) { assertTrue(checkMembersListContainsMemberId(returnedMembers, member.getUser().getId())); } assertTrue(returnedMembers.size() == 4); for (ChatThreadMember member: options.getMembers()) { chatThreadClient.removeMemberWithResponse(member.getUser()).block().getValue(); } } @Test public void canSendThenGetMessage() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessage(messageRequest).block(); ChatMessage message = chatThreadClient.getMessage(response.getId()).block(); assertEquals(message.getContent(), messageRequest.getContent()); assertEquals(message.getPriority(), messageRequest.getPriority()); assertEquals(message.getSenderDisplayName(), messageRequest.getSenderDisplayName()); } @Test public void canSendThenGetMessageWithResponse() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessageWithResponse(messageRequest).block().getValue(); ChatMessage message = chatThreadClient.getMessageWithResponse(response.getId()).block().getValue(); assertEquals(message.getContent(), messageRequest.getContent()); assertEquals(message.getPriority(), messageRequest.getPriority()); assertEquals(message.getSenderDisplayName(), messageRequest.getSenderDisplayName()); } @Test public void canDeleteExistingMessage() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.deleteMessage(response.getId()); } @Test public void canDeleteExistingMessageWithResponse() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.deleteMessageWithResponse(response.getId()).block(); } @Test public void canUpdateExistingMessage() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); UpdateChatMessageOptions updateMessageRequest = ChatOptionsProvider.updateMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.updateMessage(response.getId(), updateMessageRequest).block(); ChatMessage message = chatThreadClient.getMessage(response.getId()).block(); assertEquals(message.getContent(), updateMessageRequest.getContent()); } @Test public void canUpdateExistingMessageWithResponse() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); UpdateChatMessageOptions updateMessageRequest = ChatOptionsProvider.updateMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.updateMessageWithResponse(response.getId(), updateMessageRequest).block(); ChatMessage message = chatThreadClient.getMessage(response.getId()).block(); assertEquals(message.getContent(), updateMessageRequest.getContent()); } @Test public void canListMessages() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.sendMessage(messageRequest).block(); PagedIterable<ChatMessage> messagesResponse = new PagedIterable<ChatMessage>(chatThreadClient.listMessages()); List<ChatMessage> returnedMessages = new ArrayList<ChatMessage>(); messagesResponse.iterableByPage().forEach(resp -> { assertEquals(resp.getStatusCode(), 200); resp.getItems().forEach(item -> { if (item.getType().equals("Text")) { returnedMessages.add(item); } }); }); assertTrue(returnedMessages.size() == 2); } @Test public void canListMessagesWithOptions() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.sendMessage(messageRequest).block(); ListChatMessagesOptions options = new ListChatMessagesOptions(); options.setMaxPageSize(10); options.setStartTime(OffsetDateTime.parse("2020-09-08T01:02:14.387Z")); PagedIterable<ChatMessage> messagesResponse = new PagedIterable<ChatMessage>(chatThreadClient.listMessages(options)); List<ChatMessage> returnedMessages = new ArrayList<ChatMessage>(); messagesResponse.iterableByPage().forEach(resp -> { assertEquals(resp.getStatusCode(), 200); resp.getItems().forEach(item -> { if (item.getType().equals("Text")) { returnedMessages.add(item); } }); }); assertTrue(returnedMessages.size() == 2); } @Test public void canSendTypingNotification() { chatThreadClient.sendTypingNotification().block(); } @Test public void canSendTypingNotificationWithResponse() { chatThreadClient.sendTypingNotificationWithResponse().block(); } @Test public void canSendThenListReadReceipts() throws InterruptedException { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.sendReadReceipt(response.getId()).block(); PagedIterable<ReadReceipt> readReceiptsResponse = new PagedIterable<ReadReceipt>(chatThreadClient.listReadReceipts()); List<ReadReceipt> returnedReadReceipts = new ArrayList<ReadReceipt>(); readReceiptsResponse.iterableByPage().forEach(resp -> { assertEquals(resp.getStatusCode(), 200); resp.getItems().forEach(item -> returnedReadReceipts.add(item)); }); if (interceptorManager.isPlaybackMode()) { assertTrue(returnedReadReceipts.size() > 0); checkReadReceiptListContainsMessageId(returnedReadReceipts, response.getId()); } } @Test }
class ChatThreadAsyncClientTest extends ChatClientTestBase { private ClientLogger logger = new ClientLogger(ChatThreadAsyncClientTest.class); private CommunicationIdentityClient communicationClient; private ChatAsyncClient client; private ChatThreadAsyncClient chatThreadClient; private String threadId; private CommunicationUser firstThreadMember; private CommunicationUser secondThreadMember; private CommunicationUser firstAddedThreadMember; private CommunicationUser secondAddedThreadMember; @Override protected void beforeTest() { super.beforeTest(); communicationClient = getCommunicationIdentityClientBuilder().buildClient(); firstThreadMember = communicationClient.createUser(); secondThreadMember = communicationClient.createUser(); List<String> scopes = new ArrayList<String>(Arrays.asList("chat")); CommunicationUserToken response = communicationClient.issueToken(firstThreadMember, scopes); client = getChatClientBuilder(response.getToken()).buildAsyncClient(); CreateChatThreadOptions threadRequest = ChatOptionsProvider.createThreadOptions( firstThreadMember.getId(), secondThreadMember.getId()); chatThreadClient = client.createChatThread(threadRequest).block(); threadId = chatThreadClient.getChatThreadId(); } @Override protected void afterTest() { super.afterTest(); } @Test public void canUpdateThread() { UpdateChatThreadOptions threadRequest = ChatOptionsProvider.updateThreadOptions(); chatThreadClient.updateChatThread(threadRequest).block(); ChatThread chatThread = client.getChatThread(threadId).block(); assertEquals(chatThread.getTopic(), threadRequest.getTopic()); } @Test public void canUpdateThreadWithResponse() { UpdateChatThreadOptions threadRequest = ChatOptionsProvider.updateThreadOptions(); chatThreadClient.updateChatThreadWithResponse(threadRequest).block().getValue(); ChatThread chatThread = client.getChatThread(threadId).block(); assertEquals(chatThread.getTopic(), threadRequest.getTopic()); } @Test public void canAddListAndRemoveMembersAsync() throws InterruptedException { firstAddedThreadMember = communicationClient.createUser(); secondAddedThreadMember = communicationClient.createUser(); AddChatThreadMembersOptions options = ChatOptionsProvider.addThreadMembersOptions( firstAddedThreadMember.getId(), secondAddedThreadMember.getId()); chatThreadClient.addMembers(options).block(); PagedIterable<ChatThreadMember> membersResponse = new PagedIterable<>(chatThreadClient.listMembers()); List<ChatThreadMember> returnedMembers = new ArrayList<ChatThreadMember>(); membersResponse.iterableByPage().forEach(resp -> { assertEquals(resp.getStatusCode(), 200); resp.getItems().forEach(item -> returnedMembers.add(item)); }); for (ChatThreadMember member: options.getMembers()) { assertTrue(checkMembersListContainsMemberId(returnedMembers, member.getUser().getId())); } assertTrue(returnedMembers.size() == 4); for (ChatThreadMember member: options.getMembers()) { chatThreadClient.removeMember(member.getUser()).block(); } } @Test public void canAddListAndRemoveMembersWithResponseAsync() throws InterruptedException { firstAddedThreadMember = communicationClient.createUser(); secondAddedThreadMember = communicationClient.createUser(); AddChatThreadMembersOptions options = ChatOptionsProvider.addThreadMembersOptions( firstAddedThreadMember.getId(), secondAddedThreadMember.getId()); chatThreadClient.addMembersWithResponse(options).block().getValue(); PagedIterable<ChatThreadMember> membersResponse = new PagedIterable<>(chatThreadClient.listMembers()); List<ChatThreadMember> returnedMembers = new ArrayList<ChatThreadMember>(); membersResponse.iterableByPage().forEach(resp -> { assertEquals(resp.getStatusCode(), 200); resp.getItems().forEach(item -> returnedMembers.add(item)); }); for (ChatThreadMember member: options.getMembers()) { assertTrue(checkMembersListContainsMemberId(returnedMembers, member.getUser().getId())); } assertTrue(returnedMembers.size() == 4); for (ChatThreadMember member: options.getMembers()) { chatThreadClient.removeMemberWithResponse(member.getUser()).block().getValue(); } } @Test public void canSendThenGetMessage() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessage(messageRequest).block(); ChatMessage message = chatThreadClient.getMessage(response.getId()).block(); assertEquals(message.getContent(), messageRequest.getContent()); assertEquals(message.getPriority(), messageRequest.getPriority()); assertEquals(message.getSenderDisplayName(), messageRequest.getSenderDisplayName()); } @Test public void canSendThenGetMessageWithResponse() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessageWithResponse(messageRequest).block().getValue(); ChatMessage message = chatThreadClient.getMessageWithResponse(response.getId()).block().getValue(); assertEquals(message.getContent(), messageRequest.getContent()); assertEquals(message.getPriority(), messageRequest.getPriority()); assertEquals(message.getSenderDisplayName(), messageRequest.getSenderDisplayName()); } @Test public void canDeleteExistingMessage() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.deleteMessage(response.getId()); } @Test public void canDeleteExistingMessageWithResponse() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.deleteMessageWithResponse(response.getId()).block(); } @Test public void canUpdateExistingMessage() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); UpdateChatMessageOptions updateMessageRequest = ChatOptionsProvider.updateMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.updateMessage(response.getId(), updateMessageRequest).block(); ChatMessage message = chatThreadClient.getMessage(response.getId()).block(); assertEquals(message.getContent(), updateMessageRequest.getContent()); } @Test public void canUpdateExistingMessageWithResponse() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); UpdateChatMessageOptions updateMessageRequest = ChatOptionsProvider.updateMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.updateMessageWithResponse(response.getId(), updateMessageRequest).block(); ChatMessage message = chatThreadClient.getMessage(response.getId()).block(); assertEquals(message.getContent(), updateMessageRequest.getContent()); } @Test public void canListMessages() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.sendMessage(messageRequest).block(); PagedIterable<ChatMessage> messagesResponse = new PagedIterable<ChatMessage>(chatThreadClient.listMessages()); List<ChatMessage> returnedMessages = new ArrayList<ChatMessage>(); messagesResponse.iterableByPage().forEach(resp -> { assertEquals(resp.getStatusCode(), 200); resp.getItems().forEach(item -> { if (item.getType().equals("Text")) { returnedMessages.add(item); } }); }); assertTrue(returnedMessages.size() == 2); } @Test public void canListMessagesWithOptions() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.sendMessage(messageRequest).block(); ListChatMessagesOptions options = new ListChatMessagesOptions(); options.setMaxPageSize(10); options.setStartTime(OffsetDateTime.parse("2020-09-08T01:02:14.387Z")); PagedIterable<ChatMessage> messagesResponse = new PagedIterable<ChatMessage>(chatThreadClient.listMessages(options)); List<ChatMessage> returnedMessages = new ArrayList<ChatMessage>(); messagesResponse.iterableByPage().forEach(resp -> { assertEquals(resp.getStatusCode(), 200); resp.getItems().forEach(item -> { if (item.getType().equals("Text")) { returnedMessages.add(item); } }); }); assertTrue(returnedMessages.size() == 2); } @Test public void canSendTypingNotification() { chatThreadClient.sendTypingNotification().block(); } @Test public void canSendTypingNotificationWithResponse() { chatThreadClient.sendTypingNotificationWithResponse().block(); } @Test public void canSendThenListReadReceipts() throws InterruptedException { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.sendReadReceipt(response.getId()).block(); PagedIterable<ReadReceipt> readReceiptsResponse = new PagedIterable<ReadReceipt>(chatThreadClient.listReadReceipts()); List<ReadReceipt> returnedReadReceipts = new ArrayList<ReadReceipt>(); readReceiptsResponse.iterableByPage().forEach(resp -> { assertEquals(resp.getStatusCode(), 200); resp.getItems().forEach(item -> returnedReadReceipts.add(item)); }); if (interceptorManager.isPlaybackMode()) { assertTrue(returnedReadReceipts.size() > 0); checkReadReceiptListContainsMessageId(returnedReadReceipts, response.getId()); } } @Test }
I believe the 404 was happening sporadically when the message was sent, and we then send a readreceipt - chatThreadClient.sendReadReceiptWithResponse(response.getId()) At least that was the line that failed 1 time in the .NET sdk. We could leave this one as is for now, but if we see that happening I agree with @minnieliu . In the .NET version, I took the entire sendmessage/sendreadreceipt, getreadreceipts out of the live workflow into the Playback mode to avoid flakiness
public void canSendThenListReadReceiptsWithResponse() throws InterruptedException { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.sendReadReceiptWithResponse(response.getId()).block(); PagedIterable<ReadReceipt> readReceiptsResponse = new PagedIterable<ReadReceipt>(chatThreadClient.listReadReceipts()); List<ReadReceipt> returnedReadReceipts = new ArrayList<ReadReceipt>(); readReceiptsResponse.iterableByPage().forEach(resp -> { assertEquals(resp.getStatusCode(), 200); resp.getItems().forEach(item -> returnedReadReceipts.add(item)); }); if (interceptorManager.isPlaybackMode()) { assertTrue(returnedReadReceipts.size() > 0); checkReadReceiptListContainsMessageId(returnedReadReceipts, response.getId()); } }
public void canSendThenListReadReceiptsWithResponse() throws InterruptedException { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.sendReadReceiptWithResponse(response.getId()).block(); PagedIterable<ReadReceipt> readReceiptsResponse = new PagedIterable<ReadReceipt>(chatThreadClient.listReadReceipts()); List<ReadReceipt> returnedReadReceipts = new ArrayList<ReadReceipt>(); readReceiptsResponse.iterableByPage().forEach(resp -> { assertEquals(resp.getStatusCode(), 200); resp.getItems().forEach(item -> returnedReadReceipts.add(item)); }); if (interceptorManager.isPlaybackMode()) { assertTrue(returnedReadReceipts.size() > 0); checkReadReceiptListContainsMessageId(returnedReadReceipts, response.getId()); } }
class ChatThreadAsyncClientTest extends ChatClientTestBase { private ClientLogger logger = new ClientLogger(ChatThreadAsyncClientTest.class); private CommunicationIdentityClient communicationClient; private ChatAsyncClient client; private ChatThreadAsyncClient chatThreadClient; private String threadId; private CommunicationUser firstThreadMember; private CommunicationUser secondThreadMember; private CommunicationUser firstAddedThreadMember; private CommunicationUser secondAddedThreadMember; @Override protected void beforeTest() { super.beforeTest(); communicationClient = getCommunicationIdentityClientBuilder().buildClient(); firstThreadMember = communicationClient.createUser(); secondThreadMember = communicationClient.createUser(); List<String> scopes = new ArrayList<String>(Arrays.asList("chat")); CommunicationUserToken response = communicationClient.issueToken(firstThreadMember, scopes); client = getChatClientBuilder(response.getToken()).buildAsyncClient(); CreateChatThreadOptions threadRequest = ChatOptionsProvider.createThreadOptions( firstThreadMember.getId(), secondThreadMember.getId()); chatThreadClient = client.createChatThread(threadRequest).block(); threadId = chatThreadClient.getChatThreadId(); } @Override protected void afterTest() { super.afterTest(); } @Test public void canUpdateThread() { UpdateChatThreadOptions threadRequest = ChatOptionsProvider.updateThreadOptions(); chatThreadClient.updateChatThread(threadRequest).block(); ChatThread chatThread = client.getChatThread(threadId).block(); assertEquals(chatThread.getTopic(), threadRequest.getTopic()); } @Test public void canUpdateThreadWithResponse() { UpdateChatThreadOptions threadRequest = ChatOptionsProvider.updateThreadOptions(); chatThreadClient.updateChatThreadWithResponse(threadRequest).block().getValue(); ChatThread chatThread = client.getChatThread(threadId).block(); assertEquals(chatThread.getTopic(), threadRequest.getTopic()); } @Test public void canAddListAndRemoveMembersAsync() throws InterruptedException { firstAddedThreadMember = communicationClient.createUser(); secondAddedThreadMember = communicationClient.createUser(); AddChatThreadMembersOptions options = ChatOptionsProvider.addThreadMembersOptions( firstAddedThreadMember.getId(), secondAddedThreadMember.getId()); chatThreadClient.addMembers(options).block(); PagedIterable<ChatThreadMember> membersResponse = new PagedIterable<>(chatThreadClient.listMembers()); List<ChatThreadMember> returnedMembers = new ArrayList<ChatThreadMember>(); membersResponse.iterableByPage().forEach(resp -> { assertEquals(resp.getStatusCode(), 200); resp.getItems().forEach(item -> returnedMembers.add(item)); }); for (ChatThreadMember member: options.getMembers()) { assertTrue(checkMembersListContainsMemberId(returnedMembers, member.getUser().getId())); } assertTrue(returnedMembers.size() == 4); for (ChatThreadMember member: options.getMembers()) { chatThreadClient.removeMember(member.getUser()).block(); } } @Test public void canAddListAndRemoveMembersWithResponseAsync() throws InterruptedException { firstAddedThreadMember = communicationClient.createUser(); secondAddedThreadMember = communicationClient.createUser(); AddChatThreadMembersOptions options = ChatOptionsProvider.addThreadMembersOptions( firstAddedThreadMember.getId(), secondAddedThreadMember.getId()); chatThreadClient.addMembersWithResponse(options).block().getValue(); PagedIterable<ChatThreadMember> membersResponse = new PagedIterable<>(chatThreadClient.listMembers()); List<ChatThreadMember> returnedMembers = new ArrayList<ChatThreadMember>(); membersResponse.iterableByPage().forEach(resp -> { assertEquals(resp.getStatusCode(), 200); resp.getItems().forEach(item -> returnedMembers.add(item)); }); for (ChatThreadMember member: options.getMembers()) { assertTrue(checkMembersListContainsMemberId(returnedMembers, member.getUser().getId())); } assertTrue(returnedMembers.size() == 4); for (ChatThreadMember member: options.getMembers()) { chatThreadClient.removeMemberWithResponse(member.getUser()).block().getValue(); } } @Test public void canSendThenGetMessage() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessage(messageRequest).block(); ChatMessage message = chatThreadClient.getMessage(response.getId()).block(); assertEquals(message.getContent(), messageRequest.getContent()); assertEquals(message.getPriority(), messageRequest.getPriority()); assertEquals(message.getSenderDisplayName(), messageRequest.getSenderDisplayName()); } @Test public void canSendThenGetMessageWithResponse() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessageWithResponse(messageRequest).block().getValue(); ChatMessage message = chatThreadClient.getMessageWithResponse(response.getId()).block().getValue(); assertEquals(message.getContent(), messageRequest.getContent()); assertEquals(message.getPriority(), messageRequest.getPriority()); assertEquals(message.getSenderDisplayName(), messageRequest.getSenderDisplayName()); } @Test public void canDeleteExistingMessage() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.deleteMessage(response.getId()); } @Test public void canDeleteExistingMessageWithResponse() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.deleteMessageWithResponse(response.getId()).block(); } @Test public void canUpdateExistingMessage() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); UpdateChatMessageOptions updateMessageRequest = ChatOptionsProvider.updateMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.updateMessage(response.getId(), updateMessageRequest).block(); ChatMessage message = chatThreadClient.getMessage(response.getId()).block(); assertEquals(message.getContent(), updateMessageRequest.getContent()); } @Test public void canUpdateExistingMessageWithResponse() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); UpdateChatMessageOptions updateMessageRequest = ChatOptionsProvider.updateMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.updateMessageWithResponse(response.getId(), updateMessageRequest).block(); ChatMessage message = chatThreadClient.getMessage(response.getId()).block(); assertEquals(message.getContent(), updateMessageRequest.getContent()); } @Test public void canListMessages() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.sendMessage(messageRequest).block(); PagedIterable<ChatMessage> messagesResponse = new PagedIterable<ChatMessage>(chatThreadClient.listMessages()); List<ChatMessage> returnedMessages = new ArrayList<ChatMessage>(); messagesResponse.iterableByPage().forEach(resp -> { assertEquals(resp.getStatusCode(), 200); resp.getItems().forEach(item -> { if (item.getType().equals("Text")) { returnedMessages.add(item); } }); }); assertTrue(returnedMessages.size() == 2); } @Test public void canListMessagesWithOptions() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.sendMessage(messageRequest).block(); ListChatMessagesOptions options = new ListChatMessagesOptions(); options.setMaxPageSize(10); options.setStartTime(OffsetDateTime.parse("2020-09-08T01:02:14.387Z")); PagedIterable<ChatMessage> messagesResponse = new PagedIterable<ChatMessage>(chatThreadClient.listMessages(options)); List<ChatMessage> returnedMessages = new ArrayList<ChatMessage>(); messagesResponse.iterableByPage().forEach(resp -> { assertEquals(resp.getStatusCode(), 200); resp.getItems().forEach(item -> { if (item.getType().equals("Text")) { returnedMessages.add(item); } }); }); assertTrue(returnedMessages.size() == 2); } @Test public void canSendTypingNotification() { chatThreadClient.sendTypingNotification().block(); } @Test public void canSendTypingNotificationWithResponse() { chatThreadClient.sendTypingNotificationWithResponse().block(); } @Test public void canSendThenListReadReceipts() throws InterruptedException { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.sendReadReceipt(response.getId()).block(); PagedIterable<ReadReceipt> readReceiptsResponse = new PagedIterable<ReadReceipt>(chatThreadClient.listReadReceipts()); List<ReadReceipt> returnedReadReceipts = new ArrayList<ReadReceipt>(); readReceiptsResponse.iterableByPage().forEach(resp -> { assertEquals(resp.getStatusCode(), 200); resp.getItems().forEach(item -> returnedReadReceipts.add(item)); }); if (interceptorManager.isPlaybackMode()) { assertTrue(returnedReadReceipts.size() > 0); checkReadReceiptListContainsMessageId(returnedReadReceipts, response.getId()); } } @Test }
class ChatThreadAsyncClientTest extends ChatClientTestBase { private ClientLogger logger = new ClientLogger(ChatThreadAsyncClientTest.class); private CommunicationIdentityClient communicationClient; private ChatAsyncClient client; private ChatThreadAsyncClient chatThreadClient; private String threadId; private CommunicationUser firstThreadMember; private CommunicationUser secondThreadMember; private CommunicationUser firstAddedThreadMember; private CommunicationUser secondAddedThreadMember; @Override protected void beforeTest() { super.beforeTest(); communicationClient = getCommunicationIdentityClientBuilder().buildClient(); firstThreadMember = communicationClient.createUser(); secondThreadMember = communicationClient.createUser(); List<String> scopes = new ArrayList<String>(Arrays.asList("chat")); CommunicationUserToken response = communicationClient.issueToken(firstThreadMember, scopes); client = getChatClientBuilder(response.getToken()).buildAsyncClient(); CreateChatThreadOptions threadRequest = ChatOptionsProvider.createThreadOptions( firstThreadMember.getId(), secondThreadMember.getId()); chatThreadClient = client.createChatThread(threadRequest).block(); threadId = chatThreadClient.getChatThreadId(); } @Override protected void afterTest() { super.afterTest(); } @Test public void canUpdateThread() { UpdateChatThreadOptions threadRequest = ChatOptionsProvider.updateThreadOptions(); chatThreadClient.updateChatThread(threadRequest).block(); ChatThread chatThread = client.getChatThread(threadId).block(); assertEquals(chatThread.getTopic(), threadRequest.getTopic()); } @Test public void canUpdateThreadWithResponse() { UpdateChatThreadOptions threadRequest = ChatOptionsProvider.updateThreadOptions(); chatThreadClient.updateChatThreadWithResponse(threadRequest).block().getValue(); ChatThread chatThread = client.getChatThread(threadId).block(); assertEquals(chatThread.getTopic(), threadRequest.getTopic()); } @Test public void canAddListAndRemoveMembersAsync() throws InterruptedException { firstAddedThreadMember = communicationClient.createUser(); secondAddedThreadMember = communicationClient.createUser(); AddChatThreadMembersOptions options = ChatOptionsProvider.addThreadMembersOptions( firstAddedThreadMember.getId(), secondAddedThreadMember.getId()); chatThreadClient.addMembers(options).block(); PagedIterable<ChatThreadMember> membersResponse = new PagedIterable<>(chatThreadClient.listMembers()); List<ChatThreadMember> returnedMembers = new ArrayList<ChatThreadMember>(); membersResponse.iterableByPage().forEach(resp -> { assertEquals(resp.getStatusCode(), 200); resp.getItems().forEach(item -> returnedMembers.add(item)); }); for (ChatThreadMember member: options.getMembers()) { assertTrue(checkMembersListContainsMemberId(returnedMembers, member.getUser().getId())); } assertTrue(returnedMembers.size() == 4); for (ChatThreadMember member: options.getMembers()) { chatThreadClient.removeMember(member.getUser()).block(); } } @Test public void canAddListAndRemoveMembersWithResponseAsync() throws InterruptedException { firstAddedThreadMember = communicationClient.createUser(); secondAddedThreadMember = communicationClient.createUser(); AddChatThreadMembersOptions options = ChatOptionsProvider.addThreadMembersOptions( firstAddedThreadMember.getId(), secondAddedThreadMember.getId()); chatThreadClient.addMembersWithResponse(options).block().getValue(); PagedIterable<ChatThreadMember> membersResponse = new PagedIterable<>(chatThreadClient.listMembers()); List<ChatThreadMember> returnedMembers = new ArrayList<ChatThreadMember>(); membersResponse.iterableByPage().forEach(resp -> { assertEquals(resp.getStatusCode(), 200); resp.getItems().forEach(item -> returnedMembers.add(item)); }); for (ChatThreadMember member: options.getMembers()) { assertTrue(checkMembersListContainsMemberId(returnedMembers, member.getUser().getId())); } assertTrue(returnedMembers.size() == 4); for (ChatThreadMember member: options.getMembers()) { chatThreadClient.removeMemberWithResponse(member.getUser()).block().getValue(); } } @Test public void canSendThenGetMessage() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessage(messageRequest).block(); ChatMessage message = chatThreadClient.getMessage(response.getId()).block(); assertEquals(message.getContent(), messageRequest.getContent()); assertEquals(message.getPriority(), messageRequest.getPriority()); assertEquals(message.getSenderDisplayName(), messageRequest.getSenderDisplayName()); } @Test public void canSendThenGetMessageWithResponse() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessageWithResponse(messageRequest).block().getValue(); ChatMessage message = chatThreadClient.getMessageWithResponse(response.getId()).block().getValue(); assertEquals(message.getContent(), messageRequest.getContent()); assertEquals(message.getPriority(), messageRequest.getPriority()); assertEquals(message.getSenderDisplayName(), messageRequest.getSenderDisplayName()); } @Test public void canDeleteExistingMessage() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.deleteMessage(response.getId()); } @Test public void canDeleteExistingMessageWithResponse() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.deleteMessageWithResponse(response.getId()).block(); } @Test public void canUpdateExistingMessage() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); UpdateChatMessageOptions updateMessageRequest = ChatOptionsProvider.updateMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.updateMessage(response.getId(), updateMessageRequest).block(); ChatMessage message = chatThreadClient.getMessage(response.getId()).block(); assertEquals(message.getContent(), updateMessageRequest.getContent()); } @Test public void canUpdateExistingMessageWithResponse() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); UpdateChatMessageOptions updateMessageRequest = ChatOptionsProvider.updateMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.updateMessageWithResponse(response.getId(), updateMessageRequest).block(); ChatMessage message = chatThreadClient.getMessage(response.getId()).block(); assertEquals(message.getContent(), updateMessageRequest.getContent()); } @Test public void canListMessages() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.sendMessage(messageRequest).block(); PagedIterable<ChatMessage> messagesResponse = new PagedIterable<ChatMessage>(chatThreadClient.listMessages()); List<ChatMessage> returnedMessages = new ArrayList<ChatMessage>(); messagesResponse.iterableByPage().forEach(resp -> { assertEquals(resp.getStatusCode(), 200); resp.getItems().forEach(item -> { if (item.getType().equals("Text")) { returnedMessages.add(item); } }); }); assertTrue(returnedMessages.size() == 2); } @Test public void canListMessagesWithOptions() { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.sendMessage(messageRequest).block(); ListChatMessagesOptions options = new ListChatMessagesOptions(); options.setMaxPageSize(10); options.setStartTime(OffsetDateTime.parse("2020-09-08T01:02:14.387Z")); PagedIterable<ChatMessage> messagesResponse = new PagedIterable<ChatMessage>(chatThreadClient.listMessages(options)); List<ChatMessage> returnedMessages = new ArrayList<ChatMessage>(); messagesResponse.iterableByPage().forEach(resp -> { assertEquals(resp.getStatusCode(), 200); resp.getItems().forEach(item -> { if (item.getType().equals("Text")) { returnedMessages.add(item); } }); }); assertTrue(returnedMessages.size() == 2); } @Test public void canSendTypingNotification() { chatThreadClient.sendTypingNotification().block(); } @Test public void canSendTypingNotificationWithResponse() { chatThreadClient.sendTypingNotificationWithResponse().block(); } @Test public void canSendThenListReadReceipts() throws InterruptedException { SendChatMessageOptions messageRequest = ChatOptionsProvider.sendMessageOptions(); SendChatMessageResult response = chatThreadClient.sendMessage(messageRequest).block(); chatThreadClient.sendReadReceipt(response.getId()).block(); PagedIterable<ReadReceipt> readReceiptsResponse = new PagedIterable<ReadReceipt>(chatThreadClient.listReadReceipts()); List<ReadReceipt> returnedReadReceipts = new ArrayList<ReadReceipt>(); readReceiptsResponse.iterableByPage().forEach(resp -> { assertEquals(resp.getStatusCode(), 200); resp.getItems().forEach(item -> returnedReadReceipts.add(item)); }); if (interceptorManager.isPlaybackMode()) { assertTrue(returnedReadReceipts.size() > 0); checkReadReceiptListContainsMessageId(returnedReadReceipts, response.getId()); } } @Test }
Should it ignore case?
private void applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); boolean sessionTokenApplicable = Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) || (this.defaultConsistencyLevel == ConsistencyLevel.SESSION && (!request.isReadOnlyRequest() || request.getResourceType() != ResourceType.Document || !Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString()))); if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return; } if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { return; } String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } }
Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) ||
private void applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); boolean sessionTokenApplicable = Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) || (this.defaultConsistencyLevel == ConsistencyLevel.SESSION && (!request.isReadOnlyRequest() || request.getResourceType() != ResourceType.Document || !Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString()))); if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return; } if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { return; } String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, uri, uri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setTransportClientRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> new RxDocumentServiceResponse(this.clientContext, rsp)) .onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: return this.create(request); case Upsert: return this.upsert(request); case Delete: return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { this.applySessionToken(request); Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, uri, uri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setTransportClientRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> new RxDocumentServiceResponse(this.clientContext, rsp)) .onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Upsert: return this.upsert(request); case Delete: return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { this.applySessionToken(request); Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } }
1. Should execute stored procedure be included? 2. What about get query plan? That never needs the session token.
private void applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); boolean sessionTokenApplicable = Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) || (this.defaultConsistencyLevel == ConsistencyLevel.SESSION && (!request.isReadOnlyRequest() || request.getResourceType() != ResourceType.Document || !Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString()))); if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return; } if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { return; } String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } }
request.getResourceType() != ResourceType.Document ||
private void applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); boolean sessionTokenApplicable = Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) || (this.defaultConsistencyLevel == ConsistencyLevel.SESSION && (!request.isReadOnlyRequest() || request.getResourceType() != ResourceType.Document || !Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString()))); if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return; } if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { return; } String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, uri, uri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setTransportClientRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> new RxDocumentServiceResponse(this.clientContext, rsp)) .onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: return this.create(request); case Upsert: return this.upsert(request); case Delete: return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { this.applySessionToken(request); Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, uri, uri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setTransportClientRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> new RxDocumentServiceResponse(this.clientContext, rsp)) .onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Upsert: return this.upsert(request); case Delete: return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { this.applySessionToken(request); Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } }
This should likely either change return types to the TaskCountsResult model or another method should be added exposing taskSlotCounts as well
public TaskCounts getTaskCounts(String jobId, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobGetTaskCountsOptions options = new JobGetTaskCountsOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); return this.parentBatchClient.protocolLayer().jobs().getTaskCounts(jobId, options).taskCounts(); }
return this.parentBatchClient.protocolLayer().jobs().getTaskCounts(jobId, options).taskCounts();
public TaskCounts getTaskCounts(String jobId, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { return getTaskCountsResult(jobId, additionalBehaviors).taskCounts(); }
class JobOperations implements IInheritedBehaviors { private Collection<BatchClientBehavior> customBehaviors; private final BatchClient parentBatchClient; JobOperations(BatchClient batchClient, Collection<BatchClientBehavior> inheritedBehaviors) { parentBatchClient = batchClient; InternalHelper.inheritClientBehaviorsAndSetPublicProperty(this, inheritedBehaviors); } /** * Gets a collection of behaviors that modify or customize requests to the Batch service. * * @return A collection of {@link BatchClientBehavior} instances. */ @Override public Collection<BatchClientBehavior> customBehaviors() { return customBehaviors; } /** * Sets a collection of behaviors that modify or customize requests to the Batch service. * * @param behaviors The collection of {@link BatchClientBehavior} instances. * @return The current instance. */ @Override public IInheritedBehaviors withCustomBehaviors(Collection<BatchClientBehavior> behaviors) { customBehaviors = behaviors; return this; } /** * Gets lifetime summary statistics for all of the jobs in the current account. * * @return The aggregated job statistics. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public JobStatistics getAllJobsLifetimeStatistics() throws BatchErrorException, IOException { return getAllJobsLifetimeStatistics(null); } /** * Gets lifetime summary statistics for all of the jobs in the current account. * * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @return The aggregated job statistics. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public JobStatistics getAllJobsLifetimeStatistics(Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobGetAllLifetimeStatisticsOptions options = new JobGetAllLifetimeStatisticsOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); return this.parentBatchClient.protocolLayer().jobs().getAllLifetimeStatistics(options); } /** * Gets the specified {@link CloudJob}. * * @param jobId The ID of the job to get. * @return A {@link CloudJob} containing information about the specified Azure Batch job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public CloudJob getJob(String jobId) throws BatchErrorException, IOException { return getJob(jobId, null, null); } /** * Gets the specified {@link CloudJob}. * * @param jobId The ID of the job to get. * @param detailLevel A {@link DetailLevel} used for controlling which properties are retrieved from the service. * @return A {@link CloudJob} containing information about the specified Azure Batch job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public CloudJob getJob(String jobId, DetailLevel detailLevel) throws BatchErrorException, IOException { return getJob(jobId, detailLevel, null); } /** * Gets the specified {@link CloudJob}. * * @param jobId The ID of the job to get. * @param detailLevel A {@link DetailLevel} used for controlling which properties are retrieved from the service. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @return A {@link CloudJob} containing information about the specified Azure Batch job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public CloudJob getJob(String jobId, DetailLevel detailLevel, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobGetOptions getJobOptions = new JobGetOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.appendDetailLevelToPerCallBehaviors(detailLevel); bhMgr.applyRequestBehaviors(getJobOptions); return this.parentBatchClient.protocolLayer().jobs().get(jobId, getJobOptions); } /** * Lists the {@link CloudJob jobs} in the Batch account. * * @return A list of {@link CloudJob} objects. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<CloudJob> listJobs() throws BatchErrorException, IOException { return listJobs(null, (Iterable<BatchClientBehavior>) null); } /** * Lists the {@link CloudJob jobs} in the Batch account. * * @param detailLevel A {@link DetailLevel} used for filtering the list and for controlling which properties are retrieved from the service. * @return A list of {@link CloudJob} objects. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<CloudJob> listJobs(DetailLevel detailLevel) throws BatchErrorException, IOException { return listJobs(detailLevel, null); } /** * Lists the {@link CloudJob jobs} in the Batch account. * * @param detailLevel A {@link DetailLevel} used for filtering the list and for controlling which properties are retrieved from the service. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @return A list of {@link CloudJob} objects. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<CloudJob> listJobs(DetailLevel detailLevel, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobListOptions jobListOptions = new JobListOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.appendDetailLevelToPerCallBehaviors(detailLevel); bhMgr.applyRequestBehaviors(jobListOptions); return this.parentBatchClient.protocolLayer().jobs().list(jobListOptions); } /** * Lists the {@link CloudJob jobs} created under the specified job schedule. * * @param jobScheduleId The ID of job schedule. * @return A list of {@link CloudJob} objects. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<CloudJob> listJobs(String jobScheduleId) throws BatchErrorException, IOException { return listJobs(jobScheduleId, null, null); } /** * Lists the {@link CloudJob jobs} created under the specified job schedule. * * @param jobScheduleId The ID of job schedule. * @param detailLevel A {@link DetailLevel} used for filtering the list and for controlling which properties are retrieved from the service. * @return A list of {@link CloudJob} objects. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<CloudJob> listJobs(String jobScheduleId, DetailLevel detailLevel) throws BatchErrorException, IOException { return listJobs(jobScheduleId, detailLevel, null); } /** * Lists the {@link CloudJob jobs} created under the specified jobSchedule. * * @param jobScheduleId The ID of jobSchedule. * @param detailLevel A {@link DetailLevel} used for filtering the list and for controlling which properties are retrieved from the service. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @return A list of {@link CloudJob} objects. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<CloudJob> listJobs(String jobScheduleId, DetailLevel detailLevel, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobListFromJobScheduleOptions jobListOptions = new JobListFromJobScheduleOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.appendDetailLevelToPerCallBehaviors(detailLevel); bhMgr.applyRequestBehaviors(jobListOptions); return this.parentBatchClient.protocolLayer().jobs().listFromJobSchedule(jobScheduleId, jobListOptions); } /** * Lists the status of {@link JobPreparationTask} and {@link JobReleaseTask} tasks for the specified job. * * @param jobId The ID of the job. * @return A list of {@link JobPreparationAndReleaseTaskExecutionInformation} instances. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<JobPreparationAndReleaseTaskExecutionInformation> listPreparationAndReleaseTaskStatus(String jobId) throws BatchErrorException, IOException { return listPreparationAndReleaseTaskStatus(jobId, null); } /** * Lists the status of {@link JobPreparationTask} and {@link JobReleaseTask} tasks for the specified job. * * @param jobId The ID of the job. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @return A list of {@link JobPreparationAndReleaseTaskExecutionInformation} instances. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<JobPreparationAndReleaseTaskExecutionInformation> listPreparationAndReleaseTaskStatus(String jobId, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobListPreparationAndReleaseTaskStatusOptions jobListOptions = new JobListPreparationAndReleaseTaskStatusOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(jobListOptions); return this.parentBatchClient.protocolLayer().jobs().listPreparationAndReleaseTaskStatus(jobId, jobListOptions); } /** * Adds a job to the Batch account. * * @param jobId The ID of the job to be added. * @param poolInfo Specifies how a job should be assigned to a pool. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void createJob(String jobId, PoolInformation poolInfo) throws BatchErrorException, IOException { createJob(jobId, poolInfo, null); } /** * Adds a job to the Batch account. * * @param jobId The ID of the job to be added. * @param poolInfo Specifies how a job should be assigned to a pool. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void createJob(String jobId, PoolInformation poolInfo, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobAddParameter param = new JobAddParameter() .withId(jobId) .withPoolInfo(poolInfo); createJob(param, additionalBehaviors); } /** * Adds a job to the Batch account. * * @param job The job to be added. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void createJob(JobAddParameter job) throws BatchErrorException, IOException { createJob(job, null); } /** * Adds a job to the Batch account. * * @param job The job to be added. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void createJob(JobAddParameter job, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobAddOptions options = new JobAddOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); this.parentBatchClient.protocolLayer().jobs().add(job, options); } /** * Deletes the specified job. * * @param jobId The ID of the job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void deleteJob(String jobId) throws BatchErrorException, IOException { deleteJob(jobId, null); } /** * Deletes the specified job. * * @param jobId The ID of the job. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void deleteJob(String jobId, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobDeleteOptions options = new JobDeleteOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); this.parentBatchClient.protocolLayer().jobs().delete(jobId, options); } /** * Terminates the specified job, marking it as completed. * * @param jobId The ID of the job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void terminateJob(String jobId) throws BatchErrorException, IOException { terminateJob(jobId, null, null); } /** * Terminates the specified job, marking it as completed. * * @param jobId The ID of the job. * @param terminateReason The message to describe the reason the job has terminated. This text will appear when you call {@link JobExecutionInformation * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void terminateJob(String jobId, String terminateReason) throws BatchErrorException, IOException { terminateJob(jobId, terminateReason, null); } /** * Terminates the specified job, marking it as completed. * * @param jobId The ID of the job. * @param terminateReason The message to describe the reason the job has terminated. This text will appear when you call {@link JobExecutionInformation * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void terminateJob(String jobId, String terminateReason, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobTerminateOptions options = new JobTerminateOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); this.parentBatchClient.protocolLayer().jobs().terminate(jobId, terminateReason, options); } /** * Enables the specified job, allowing new tasks to run. * * @param jobId The ID of the job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void enableJob(String jobId) throws BatchErrorException, IOException { enableJob(jobId, null); } /** * Enables the specified job, allowing new tasks to run. * * @param jobId The ID of the job. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void enableJob(String jobId, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobEnableOptions options = new JobEnableOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); this.parentBatchClient.protocolLayer().jobs().enable(jobId, options); } /** * Disables the specified job. Disabled jobs do not run new tasks, but may be re-enabled later. * * @param jobId The ID of the job. * @param disableJobOption Specifies what to do with running tasks associated with the job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void disableJob(String jobId, DisableJobOption disableJobOption) throws BatchErrorException, IOException { disableJob(jobId, disableJobOption, null); } /** * Disables the specified job. Disabled jobs do not run new tasks, but may be re-enabled later. * * @param jobId The ID of the job. * @param disableJobOption Specifies what to do with running tasks associated with the job. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void disableJob(String jobId, DisableJobOption disableJobOption, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobDisableOptions options = new JobDisableOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); this.parentBatchClient.protocolLayer().jobs().disable(jobId, disableJobOption, options); } /** * Updates the specified job. * This method performs a full replace of all updatable properties of the job. For example, if the constraints parameter is null, then the Batch service removes the job's existing constraints and replaces them with the default constraints. * * @param jobId The ID of the job. * @param poolInfo The pool on which the Batch service runs the job's tasks. You may change the pool for a job only when the job is disabled. If you specify an autoPoolSpecification specification in the poolInfo, only the keepAlive property can be updated, and then only if the auto pool has a poolLifetimeOption of job. * @param priority The priority of the job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If null, it is set to the default value 0. * @param constraints The execution constraints for the job. If null, the constraints are cleared. * @param onAllTasksComplete Specifies an action the Batch service should take when all tasks in the job are in the completed state. * @param metadata A list of name-value pairs associated with the job as metadata. If null, it takes the default value of an empty list; in effect, any existing metadata is deleted. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void updateJob(String jobId, PoolInformation poolInfo, Integer priority, JobConstraints constraints, OnAllTasksComplete onAllTasksComplete, List<MetadataItem> metadata) throws BatchErrorException, IOException { updateJob(jobId, poolInfo, priority, constraints, onAllTasksComplete, metadata, null); } /** * Updates the specified job. * This method performs a full replace of all updatable properties of the job. For example, if the constraints parameter is null, then the Batch service removes the job's existing constraints and replaces them with the default constraints. * * @param jobId The ID of the job. * @param poolInfo The pool on which the Batch service runs the job's tasks. You may change the pool for a job only when the job is disabled. If you specify an autoPoolSpecification specification in the poolInfo, only the keepAlive property can be updated, and then only if the auto pool has a poolLifetimeOption of job. * @param priority The priority of the job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If null, it is set to the default value 0. * @param constraints The execution constraints for the job. If null, the constraints are cleared. * @param onAllTasksComplete Specifies an action the Batch service should take when all tasks in the job are in the completed state. * @param metadata A list of name-value pairs associated with the job as metadata. If null, it takes the default value of an empty list; in effect, any existing metadata is deleted. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void updateJob(String jobId, PoolInformation poolInfo, Integer priority, JobConstraints constraints, OnAllTasksComplete onAllTasksComplete, List<MetadataItem> metadata, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobUpdateOptions options = new JobUpdateOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); JobUpdateParameter param = new JobUpdateParameter() .withPriority(priority) .withPoolInfo(poolInfo) .withConstraints(constraints) .withOnAllTasksComplete(onAllTasksComplete) .withMetadata(metadata); this.parentBatchClient.protocolLayer().jobs().update(jobId, param, options); } /** * Updates the specified job. * This method only replaces the properties specified with non-null values. * * @param jobId The ID of the job. * @param poolInfo The pool on which the Batch service runs the job's tasks. You may change the pool for a job only when the job is disabled. If you specify an autoPoolSpecification specification in the poolInfo, only the keepAlive property can be updated, and then only if the auto pool has a poolLifetimeOption of job. If null, the job continues to run on its current pool. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void patchJob(String jobId, PoolInformation poolInfo) throws BatchErrorException, IOException { patchJob(jobId, poolInfo, null, null, null, null, null); } /** * Updates the specified job. * This method only replaces the properties specified with non-null values. * * @param jobId The ID of the job. * @param onAllTasksComplete Specifies an action the Batch service should take when all tasks in the job are in the completed state. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void patchJob(String jobId, OnAllTasksComplete onAllTasksComplete) throws BatchErrorException, IOException { patchJob(jobId, null, null, null, onAllTasksComplete, null, null); } /** * Updates the specified job. * This method only replaces the properties specified with non-null values. * * @param jobId The ID of the job. * @param poolInfo The pool on which the Batch service runs the job's tasks. You may change the pool for a job only when the job is disabled. If you specify an autoPoolSpecification specification in the poolInfo, only the keepAlive property can be updated, and then only if the auto pool has a poolLifetimeOption of job. If null, the job continues to run on its current pool. * @param priority The priority of the job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If null, the priority of the job is left unchanged. * @param constraints The execution constraints for the job. If null, the existing execution constraints are left unchanged. * @param onAllTasksComplete Specifies an action the Batch service should take when all tasks in the job are in the completed state. * @param metadata A list of name-value pairs associated with the job as metadata. If null, the existing job metadata is left unchanged. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void patchJob(String jobId, PoolInformation poolInfo, Integer priority, JobConstraints constraints, OnAllTasksComplete onAllTasksComplete, List<MetadataItem> metadata) throws BatchErrorException, IOException { patchJob(jobId, poolInfo, priority, constraints, onAllTasksComplete, metadata, null); } /** * Updates the specified job. * This method only replaces the properties specified with non-null values. * * @param jobId The ID of the job. * @param poolInfo The pool on which the Batch service runs the job's tasks. You may change the pool for a job only when the job is disabled. If you specify an autoPoolSpecification specification in the poolInfo, only the keepAlive property can be updated, and then only if the auto pool has a poolLifetimeOption of job. If null, the job continues to run on its current pool. * @param priority The priority of the job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If null, the priority of the job is left unchanged. * @param constraints The execution constraints for the job. If null, the existing execution constraints are left unchanged. * @param onAllTasksComplete Specifies an action the Batch service should take when all tasks in the job are in the completed state. * @param metadata A list of name-value pairs associated with the job as metadata. If null, the existing job metadata is left unchanged. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void patchJob(String jobId, PoolInformation poolInfo, Integer priority, JobConstraints constraints, OnAllTasksComplete onAllTasksComplete, List<MetadataItem> metadata, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobPatchParameter param = new JobPatchParameter() .withPriority(priority) .withPoolInfo(poolInfo) .withConstraints(constraints) .withOnAllTasksComplete(onAllTasksComplete) .withMetadata(metadata); patchJob(jobId, param, additionalBehaviors); } /** * Updates the specified job. * This method only replaces the properties specified with non-null values. * * @param jobId The ID of the job. * @param jobPatchParameter The set of changes to be made to a job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void patchJob(String jobId, JobPatchParameter jobPatchParameter) throws BatchErrorException, IOException { patchJob(jobId, jobPatchParameter, null); } /** * Updates the specified job. * This method only replaces the properties specified with non-null values. * * @param jobId The ID of the job. * @param jobPatchParameter The parameter to update the job. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void patchJob(String jobId, JobPatchParameter jobPatchParameter, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobPatchOptions options = new JobPatchOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); this.parentBatchClient.protocolLayer().jobs().patch(jobId, jobPatchParameter, options); } /** * Gets the task counts for the specified job. * Task counts provide a count of the tasks by active, running or completed task state, and a count of tasks which succeeded or failed. Tasks in the preparing state are counted as running. * * @param jobId The ID of the job. * @throws BatchErrorException thrown if the request is rejected by server * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. * @return the TaskCounts object if successful. */ public TaskCounts getTaskCounts(String jobId) throws BatchErrorException, IOException { return getTaskCounts(jobId, null); } /** * Gets the task counts for the specified job. * Task counts provide a count of the tasks by active, running or completed task state, and a count of tasks which succeeded or failed. Tasks in the preparing state are counted as running. * * @param jobId The ID of the job. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException thrown if the request is rejected by server * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. * @return the TaskCounts object if successful. */ }
class JobOperations implements IInheritedBehaviors { private Collection<BatchClientBehavior> customBehaviors; private final BatchClient parentBatchClient; JobOperations(BatchClient batchClient, Collection<BatchClientBehavior> inheritedBehaviors) { parentBatchClient = batchClient; InternalHelper.inheritClientBehaviorsAndSetPublicProperty(this, inheritedBehaviors); } /** * Gets a collection of behaviors that modify or customize requests to the Batch service. * * @return A collection of {@link BatchClientBehavior} instances. */ @Override public Collection<BatchClientBehavior> customBehaviors() { return customBehaviors; } /** * Sets a collection of behaviors that modify or customize requests to the Batch service. * * @param behaviors The collection of {@link BatchClientBehavior} instances. * @return The current instance. */ @Override public IInheritedBehaviors withCustomBehaviors(Collection<BatchClientBehavior> behaviors) { customBehaviors = behaviors; return this; } /** * Gets lifetime summary statistics for all of the jobs in the current account. * * @return The aggregated job statistics. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public JobStatistics getAllJobsLifetimeStatistics() throws BatchErrorException, IOException { return getAllJobsLifetimeStatistics(null); } /** * Gets lifetime summary statistics for all of the jobs in the current account. * * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @return The aggregated job statistics. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public JobStatistics getAllJobsLifetimeStatistics(Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobGetAllLifetimeStatisticsOptions options = new JobGetAllLifetimeStatisticsOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); return this.parentBatchClient.protocolLayer().jobs().getAllLifetimeStatistics(options); } /** * Gets the specified {@link CloudJob}. * * @param jobId The ID of the job to get. * @return A {@link CloudJob} containing information about the specified Azure Batch job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public CloudJob getJob(String jobId) throws BatchErrorException, IOException { return getJob(jobId, null, null); } /** * Gets the specified {@link CloudJob}. * * @param jobId The ID of the job to get. * @param detailLevel A {@link DetailLevel} used for controlling which properties are retrieved from the service. * @return A {@link CloudJob} containing information about the specified Azure Batch job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public CloudJob getJob(String jobId, DetailLevel detailLevel) throws BatchErrorException, IOException { return getJob(jobId, detailLevel, null); } /** * Gets the specified {@link CloudJob}. * * @param jobId The ID of the job to get. * @param detailLevel A {@link DetailLevel} used for controlling which properties are retrieved from the service. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @return A {@link CloudJob} containing information about the specified Azure Batch job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public CloudJob getJob(String jobId, DetailLevel detailLevel, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobGetOptions getJobOptions = new JobGetOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.appendDetailLevelToPerCallBehaviors(detailLevel); bhMgr.applyRequestBehaviors(getJobOptions); return this.parentBatchClient.protocolLayer().jobs().get(jobId, getJobOptions); } /** * Lists the {@link CloudJob jobs} in the Batch account. * * @return A list of {@link CloudJob} objects. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<CloudJob> listJobs() throws BatchErrorException, IOException { return listJobs(null, (Iterable<BatchClientBehavior>) null); } /** * Lists the {@link CloudJob jobs} in the Batch account. * * @param detailLevel A {@link DetailLevel} used for filtering the list and for controlling which properties are retrieved from the service. * @return A list of {@link CloudJob} objects. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<CloudJob> listJobs(DetailLevel detailLevel) throws BatchErrorException, IOException { return listJobs(detailLevel, null); } /** * Lists the {@link CloudJob jobs} in the Batch account. * * @param detailLevel A {@link DetailLevel} used for filtering the list and for controlling which properties are retrieved from the service. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @return A list of {@link CloudJob} objects. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<CloudJob> listJobs(DetailLevel detailLevel, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobListOptions jobListOptions = new JobListOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.appendDetailLevelToPerCallBehaviors(detailLevel); bhMgr.applyRequestBehaviors(jobListOptions); return this.parentBatchClient.protocolLayer().jobs().list(jobListOptions); } /** * Lists the {@link CloudJob jobs} created under the specified job schedule. * * @param jobScheduleId The ID of job schedule. * @return A list of {@link CloudJob} objects. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<CloudJob> listJobs(String jobScheduleId) throws BatchErrorException, IOException { return listJobs(jobScheduleId, null, null); } /** * Lists the {@link CloudJob jobs} created under the specified job schedule. * * @param jobScheduleId The ID of job schedule. * @param detailLevel A {@link DetailLevel} used for filtering the list and for controlling which properties are retrieved from the service. * @return A list of {@link CloudJob} objects. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<CloudJob> listJobs(String jobScheduleId, DetailLevel detailLevel) throws BatchErrorException, IOException { return listJobs(jobScheduleId, detailLevel, null); } /** * Lists the {@link CloudJob jobs} created under the specified jobSchedule. * * @param jobScheduleId The ID of jobSchedule. * @param detailLevel A {@link DetailLevel} used for filtering the list and for controlling which properties are retrieved from the service. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @return A list of {@link CloudJob} objects. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<CloudJob> listJobs(String jobScheduleId, DetailLevel detailLevel, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobListFromJobScheduleOptions jobListOptions = new JobListFromJobScheduleOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.appendDetailLevelToPerCallBehaviors(detailLevel); bhMgr.applyRequestBehaviors(jobListOptions); return this.parentBatchClient.protocolLayer().jobs().listFromJobSchedule(jobScheduleId, jobListOptions); } /** * Lists the status of {@link JobPreparationTask} and {@link JobReleaseTask} tasks for the specified job. * * @param jobId The ID of the job. * @return A list of {@link JobPreparationAndReleaseTaskExecutionInformation} instances. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<JobPreparationAndReleaseTaskExecutionInformation> listPreparationAndReleaseTaskStatus(String jobId) throws BatchErrorException, IOException { return listPreparationAndReleaseTaskStatus(jobId, null); } /** * Lists the status of {@link JobPreparationTask} and {@link JobReleaseTask} tasks for the specified job. * * @param jobId The ID of the job. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @return A list of {@link JobPreparationAndReleaseTaskExecutionInformation} instances. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<JobPreparationAndReleaseTaskExecutionInformation> listPreparationAndReleaseTaskStatus(String jobId, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobListPreparationAndReleaseTaskStatusOptions jobListOptions = new JobListPreparationAndReleaseTaskStatusOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(jobListOptions); return this.parentBatchClient.protocolLayer().jobs().listPreparationAndReleaseTaskStatus(jobId, jobListOptions); } /** * Adds a job to the Batch account. * * @param jobId The ID of the job to be added. * @param poolInfo Specifies how a job should be assigned to a pool. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void createJob(String jobId, PoolInformation poolInfo) throws BatchErrorException, IOException { createJob(jobId, poolInfo, null); } /** * Adds a job to the Batch account. * * @param jobId The ID of the job to be added. * @param poolInfo Specifies how a job should be assigned to a pool. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void createJob(String jobId, PoolInformation poolInfo, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobAddParameter param = new JobAddParameter() .withId(jobId) .withPoolInfo(poolInfo); createJob(param, additionalBehaviors); } /** * Adds a job to the Batch account. * * @param job The job to be added. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void createJob(JobAddParameter job) throws BatchErrorException, IOException { createJob(job, null); } /** * Adds a job to the Batch account. * * @param job The job to be added. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void createJob(JobAddParameter job, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobAddOptions options = new JobAddOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); this.parentBatchClient.protocolLayer().jobs().add(job, options); } /** * Deletes the specified job. * * @param jobId The ID of the job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void deleteJob(String jobId) throws BatchErrorException, IOException { deleteJob(jobId, null); } /** * Deletes the specified job. * * @param jobId The ID of the job. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void deleteJob(String jobId, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobDeleteOptions options = new JobDeleteOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); this.parentBatchClient.protocolLayer().jobs().delete(jobId, options); } /** * Terminates the specified job, marking it as completed. * * @param jobId The ID of the job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void terminateJob(String jobId) throws BatchErrorException, IOException { terminateJob(jobId, null, null); } /** * Terminates the specified job, marking it as completed. * * @param jobId The ID of the job. * @param terminateReason The message to describe the reason the job has terminated. This text will appear when you call {@link JobExecutionInformation * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void terminateJob(String jobId, String terminateReason) throws BatchErrorException, IOException { terminateJob(jobId, terminateReason, null); } /** * Terminates the specified job, marking it as completed. * * @param jobId The ID of the job. * @param terminateReason The message to describe the reason the job has terminated. This text will appear when you call {@link JobExecutionInformation * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void terminateJob(String jobId, String terminateReason, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobTerminateOptions options = new JobTerminateOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); this.parentBatchClient.protocolLayer().jobs().terminate(jobId, terminateReason, options); } /** * Enables the specified job, allowing new tasks to run. * * @param jobId The ID of the job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void enableJob(String jobId) throws BatchErrorException, IOException { enableJob(jobId, null); } /** * Enables the specified job, allowing new tasks to run. * * @param jobId The ID of the job. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void enableJob(String jobId, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobEnableOptions options = new JobEnableOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); this.parentBatchClient.protocolLayer().jobs().enable(jobId, options); } /** * Disables the specified job. Disabled jobs do not run new tasks, but may be re-enabled later. * * @param jobId The ID of the job. * @param disableJobOption Specifies what to do with running tasks associated with the job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void disableJob(String jobId, DisableJobOption disableJobOption) throws BatchErrorException, IOException { disableJob(jobId, disableJobOption, null); } /** * Disables the specified job. Disabled jobs do not run new tasks, but may be re-enabled later. * * @param jobId The ID of the job. * @param disableJobOption Specifies what to do with running tasks associated with the job. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void disableJob(String jobId, DisableJobOption disableJobOption, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobDisableOptions options = new JobDisableOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); this.parentBatchClient.protocolLayer().jobs().disable(jobId, disableJobOption, options); } /** * Updates the specified job. * This method performs a full replace of all updatable properties of the job. For example, if the constraints parameter is null, then the Batch service removes the job's existing constraints and replaces them with the default constraints. * * @param jobId The ID of the job. * @param poolInfo The pool on which the Batch service runs the job's tasks. You may change the pool for a job only when the job is disabled. If you specify an autoPoolSpecification specification in the poolInfo, only the keepAlive property can be updated, and then only if the auto pool has a poolLifetimeOption of job. * @param priority The priority of the job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If null, it is set to the default value 0. * @param constraints The execution constraints for the job. If null, the constraints are cleared. * @param onAllTasksComplete Specifies an action the Batch service should take when all tasks in the job are in the completed state. * @param metadata A list of name-value pairs associated with the job as metadata. If null, it takes the default value of an empty list; in effect, any existing metadata is deleted. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void updateJob(String jobId, PoolInformation poolInfo, Integer priority, JobConstraints constraints, OnAllTasksComplete onAllTasksComplete, List<MetadataItem> metadata) throws BatchErrorException, IOException { updateJob(jobId, poolInfo, priority, constraints, onAllTasksComplete, metadata, null); } /** * Updates the specified job. * This method performs a full replace of all updatable properties of the job. For example, if the constraints parameter is null, then the Batch service removes the job's existing constraints and replaces them with the default constraints. * * @param jobId The ID of the job. * @param poolInfo The pool on which the Batch service runs the job's tasks. You may change the pool for a job only when the job is disabled. If you specify an autoPoolSpecification specification in the poolInfo, only the keepAlive property can be updated, and then only if the auto pool has a poolLifetimeOption of job. * @param priority The priority of the job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If null, it is set to the default value 0. * @param constraints The execution constraints for the job. If null, the constraints are cleared. * @param onAllTasksComplete Specifies an action the Batch service should take when all tasks in the job are in the completed state. * @param metadata A list of name-value pairs associated with the job as metadata. If null, it takes the default value of an empty list; in effect, any existing metadata is deleted. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void updateJob(String jobId, PoolInformation poolInfo, Integer priority, JobConstraints constraints, OnAllTasksComplete onAllTasksComplete, List<MetadataItem> metadata, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobUpdateOptions options = new JobUpdateOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); JobUpdateParameter param = new JobUpdateParameter() .withPriority(priority) .withPoolInfo(poolInfo) .withConstraints(constraints) .withOnAllTasksComplete(onAllTasksComplete) .withMetadata(metadata); this.parentBatchClient.protocolLayer().jobs().update(jobId, param, options); } /** * Updates the specified job. * This method only replaces the properties specified with non-null values. * * @param jobId The ID of the job. * @param poolInfo The pool on which the Batch service runs the job's tasks. You may change the pool for a job only when the job is disabled. If you specify an autoPoolSpecification specification in the poolInfo, only the keepAlive property can be updated, and then only if the auto pool has a poolLifetimeOption of job. If null, the job continues to run on its current pool. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void patchJob(String jobId, PoolInformation poolInfo) throws BatchErrorException, IOException { patchJob(jobId, poolInfo, null, null, null, null, null); } /** * Updates the specified job. * This method only replaces the properties specified with non-null values. * * @param jobId The ID of the job. * @param onAllTasksComplete Specifies an action the Batch service should take when all tasks in the job are in the completed state. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void patchJob(String jobId, OnAllTasksComplete onAllTasksComplete) throws BatchErrorException, IOException { patchJob(jobId, null, null, null, onAllTasksComplete, null, null); } /** * Updates the specified job. * This method only replaces the properties specified with non-null values. * * @param jobId The ID of the job. * @param poolInfo The pool on which the Batch service runs the job's tasks. You may change the pool for a job only when the job is disabled. If you specify an autoPoolSpecification specification in the poolInfo, only the keepAlive property can be updated, and then only if the auto pool has a poolLifetimeOption of job. If null, the job continues to run on its current pool. * @param priority The priority of the job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If null, the priority of the job is left unchanged. * @param constraints The execution constraints for the job. If null, the existing execution constraints are left unchanged. * @param onAllTasksComplete Specifies an action the Batch service should take when all tasks in the job are in the completed state. * @param metadata A list of name-value pairs associated with the job as metadata. If null, the existing job metadata is left unchanged. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void patchJob(String jobId, PoolInformation poolInfo, Integer priority, JobConstraints constraints, OnAllTasksComplete onAllTasksComplete, List<MetadataItem> metadata) throws BatchErrorException, IOException { patchJob(jobId, poolInfo, priority, constraints, onAllTasksComplete, metadata, null); } /** * Updates the specified job. * This method only replaces the properties specified with non-null values. * * @param jobId The ID of the job. * @param poolInfo The pool on which the Batch service runs the job's tasks. You may change the pool for a job only when the job is disabled. If you specify an autoPoolSpecification specification in the poolInfo, only the keepAlive property can be updated, and then only if the auto pool has a poolLifetimeOption of job. If null, the job continues to run on its current pool. * @param priority The priority of the job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If null, the priority of the job is left unchanged. * @param constraints The execution constraints for the job. If null, the existing execution constraints are left unchanged. * @param onAllTasksComplete Specifies an action the Batch service should take when all tasks in the job are in the completed state. * @param metadata A list of name-value pairs associated with the job as metadata. If null, the existing job metadata is left unchanged. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void patchJob(String jobId, PoolInformation poolInfo, Integer priority, JobConstraints constraints, OnAllTasksComplete onAllTasksComplete, List<MetadataItem> metadata, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobPatchParameter param = new JobPatchParameter() .withPriority(priority) .withPoolInfo(poolInfo) .withConstraints(constraints) .withOnAllTasksComplete(onAllTasksComplete) .withMetadata(metadata); patchJob(jobId, param, additionalBehaviors); } /** * Updates the specified job. * This method only replaces the properties specified with non-null values. * * @param jobId The ID of the job. * @param jobPatchParameter The set of changes to be made to a job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void patchJob(String jobId, JobPatchParameter jobPatchParameter) throws BatchErrorException, IOException { patchJob(jobId, jobPatchParameter, null); } /** * Updates the specified job. * This method only replaces the properties specified with non-null values. * * @param jobId The ID of the job. * @param jobPatchParameter The parameter to update the job. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void patchJob(String jobId, JobPatchParameter jobPatchParameter, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobPatchOptions options = new JobPatchOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); this.parentBatchClient.protocolLayer().jobs().patch(jobId, jobPatchParameter, options); } /** * Gets the task counts for the specified job. * Task counts provide a count of the tasks by active, running or completed task state, and a count of tasks which succeeded or failed. Tasks in the preparing state are counted as running. * * @param jobId The ID of the job. * @throws BatchErrorException thrown if the request is rejected by server * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. * @return the TaskCounts object if successful. */ public TaskCounts getTaskCounts(String jobId) throws BatchErrorException, IOException { return getTaskCounts(jobId, null); } /** * Gets the task counts for the specified job. * Task counts provide a count of the tasks by active, running or completed task state, and a count of tasks which succeeded or failed. Tasks in the preparing state are counted as running. * * @param jobId The ID of the job. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException thrown if the request is rejected by server * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. * @return the TaskCounts object if successful. */ /** * Gets the task slot counts for the specified job. * Task slot counts provide a count of the tasks by active, running or completed task state, and a count of tasks which succeeded or failed. Tasks in the preparing state are counted as running. * * @param jobId The ID of the job. * @throws BatchErrorException thrown if the request is rejected by server * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. * @return the TaskSlotCounts object if successful. */ public TaskSlotCounts getTaskSlotCounts(String jobId) throws BatchErrorException, IOException { return getTaskSlotCounts(jobId, null); } /** * Gets the task slot counts for the specified job. * Task slot counts provide a count of the tasks by active, running or completed task state, and a count of tasks which succeeded or failed. Tasks in the preparing state are counted as running. * * @param jobId The ID of the job. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException thrown if the request is rejected by server * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. * @return the TaskSlotCounts object if successful. */ public TaskSlotCounts getTaskSlotCounts(String jobId, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { return getTaskCountsResult(jobId, additionalBehaviors).taskSlotCounts(); } /** * Gets the task counts result for the specified job. * The result includes both task counts and task slot counts. Each counts object provides a count of the tasks by active, running or completed task state, and a count of tasks which succeeded or failed. Tasks in the preparing state are counted as running. * * @param jobId The ID of the job. * @throws BatchErrorException thrown if the request is rejected by server * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. * @return the TaskCountsResult object if successful. */ public TaskCountsResult getTaskCountsResult(String jobId) throws BatchErrorException, IOException { return getTaskCountsResult(jobId, null); } /** * Gets the task counts result for the specified job. * The result includes both task counts and task slot counts. Each counts object provides a count of the tasks by active, running or completed task state, and a count of tasks which succeeded or failed. Tasks in the preparing state are counted as running. * * @param jobId The ID of the job. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException thrown if the request is rejected by server * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. * @return the TaskCountsResult object if successful. */ public TaskCountsResult getTaskCountsResult( String jobId, Iterable<BatchClientBehavior> additionalBehaviors ) throws BatchErrorException, IOException { JobGetTaskCountsOptions options = new JobGetTaskCountsOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); return this.parentBatchClient.protocolLayer().jobs().getTaskCounts(jobId, options); } }
Yes - thanks - good catch .
private void applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); boolean sessionTokenApplicable = Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) || (this.defaultConsistencyLevel == ConsistencyLevel.SESSION && (!request.isReadOnlyRequest() || request.getResourceType() != ResourceType.Document || !Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString()))); if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return; } if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { return; } String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } }
Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) ||
private void applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); boolean sessionTokenApplicable = Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) || (this.defaultConsistencyLevel == ConsistencyLevel.SESSION && (!request.isReadOnlyRequest() || request.getResourceType() != ResourceType.Document || !Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString()))); if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return; } if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { return; } String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, uri, uri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setTransportClientRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> new RxDocumentServiceResponse(this.clientContext, rsp)) .onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: return this.create(request); case Upsert: return this.upsert(request); case Delete: return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { this.applySessionToken(request); Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, uri, uri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setTransportClientRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> new RxDocumentServiceResponse(this.clientContext, rsp)) .onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Upsert: return this.upsert(request); case Delete: return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { this.applySessionToken(request); Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } }
Query plan - good point. Stored Procedure I would not want to touch right now. Semantics aren't clear enough for me - and I am not aware of a single scenario where customers would want to adjust consistency level for SPs individually.
private void applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); boolean sessionTokenApplicable = Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) || (this.defaultConsistencyLevel == ConsistencyLevel.SESSION && (!request.isReadOnlyRequest() || request.getResourceType() != ResourceType.Document || !Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString()))); if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return; } if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { return; } String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } }
request.getResourceType() != ResourceType.Document ||
private void applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); boolean sessionTokenApplicable = Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) || (this.defaultConsistencyLevel == ConsistencyLevel.SESSION && (!request.isReadOnlyRequest() || request.getResourceType() != ResourceType.Document || !Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString()))); if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return; } if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { return; } String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, uri, uri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setTransportClientRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> new RxDocumentServiceResponse(this.clientContext, rsp)) .onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: return this.create(request); case Upsert: return this.upsert(request); case Delete: return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { this.applySessionToken(request); Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, uri, uri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setTransportClientRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> new RxDocumentServiceResponse(this.clientContext, rsp)) .onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Upsert: return this.upsert(request); case Delete: return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { this.applySessionToken(request); Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } }
Fixed in next iteration
private void applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); boolean sessionTokenApplicable = Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) || (this.defaultConsistencyLevel == ConsistencyLevel.SESSION && (!request.isReadOnlyRequest() || request.getResourceType() != ResourceType.Document || !Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString()))); if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return; } if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { return; } String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } }
request.getResourceType() != ResourceType.Document ||
private void applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); boolean sessionTokenApplicable = Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) || (this.defaultConsistencyLevel == ConsistencyLevel.SESSION && (!request.isReadOnlyRequest() || request.getResourceType() != ResourceType.Document || !Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString()))); if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return; } if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { return; } String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, uri, uri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setTransportClientRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> new RxDocumentServiceResponse(this.clientContext, rsp)) .onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: return this.create(request); case Upsert: return this.upsert(request); case Delete: return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { this.applySessionToken(request); Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, uri, uri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setTransportClientRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> new RxDocumentServiceResponse(this.clientContext, rsp)) .onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Upsert: return this.upsert(request); case Delete: return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { this.applySessionToken(request); Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } }
Fixed in next iteration
private void applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); boolean sessionTokenApplicable = Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) || (this.defaultConsistencyLevel == ConsistencyLevel.SESSION && (!request.isReadOnlyRequest() || request.getResourceType() != ResourceType.Document || !Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString()))); if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return; } if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { return; } String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } }
Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) ||
private void applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); boolean sessionTokenApplicable = Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) || (this.defaultConsistencyLevel == ConsistencyLevel.SESSION && (!request.isReadOnlyRequest() || request.getResourceType() != ResourceType.Document || !Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString()))); if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return; } if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { return; } String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, uri, uri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setTransportClientRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> new RxDocumentServiceResponse(this.clientContext, rsp)) .onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: return this.create(request); case Upsert: return this.upsert(request); case Delete: return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { this.applySessionToken(request); Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, uri, uri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setTransportClientRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> new RxDocumentServiceResponse(this.clientContext, rsp)) .onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Upsert: return this.upsert(request); case Delete: return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { this.applySessionToken(request); Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } }
they way query plan is constructed it will never have session token, so this is No-Op. please see `QueryPlanRetriever`. but I guess it doesn't hurt to be safe.
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } else { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); }
request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN);
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, uri, uri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setTransportClientRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> new RxDocumentServiceResponse(this.clientContext, rsp)) .onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: return this.create(request); case Upsert: return this.upsert(request); case Delete: return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { this.applySessionToken(request); Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } private void applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); boolean sessionTokenApplicable = Strings.areEqualIgnoreCase(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) || (this.defaultConsistencyLevel == ConsistencyLevel.SESSION && (!request.isReadOnlyRequest() || request.getResourceType() != ResourceType.Document || !Strings.areEqualIgnoreCase(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString()))); if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return; } if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { return; } String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, uri, uri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setTransportClientRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> new RxDocumentServiceResponse(this.clientContext, rsp)) .onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Upsert: return this.upsert(request); case Delete: return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { this.applySessionToken(request); Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } private void applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); boolean sessionTokenApplicable = Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) || (this.defaultConsistencyLevel == ConsistencyLevel.SESSION && (!request.isReadOnlyRequest() || request.getResourceType() != ResourceType.Document || !Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString()))); if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return; } if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { return; } String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } } }
the code which sets the consistency level in headers, uses `ConsistencyLeve.toString()`. hence the casing will be the same. ignore-case equality is not needed.
private void applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); boolean sessionTokenApplicable = Strings.areEqualIgnoreCase(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) || (this.defaultConsistencyLevel == ConsistencyLevel.SESSION && (!request.isReadOnlyRequest() || request.getResourceType() != ResourceType.Document || !Strings.areEqualIgnoreCase(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString()))); if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return; } if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { return; } String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } }
Strings.areEqualIgnoreCase(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) ||
private void applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); boolean sessionTokenApplicable = Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) || (this.defaultConsistencyLevel == ConsistencyLevel.SESSION && (!request.isReadOnlyRequest() || request.getResourceType() != ResourceType.Document || !Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString()))); if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return; } if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { return; } String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } else { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, uri, uri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setTransportClientRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> new RxDocumentServiceResponse(this.clientContext, rsp)) .onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: return this.create(request); case Upsert: return this.upsert(request); case Delete: return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { this.applySessionToken(request); Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, uri, uri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setTransportClientRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> new RxDocumentServiceResponse(this.clientContext, rsp)) .onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Upsert: return this.upsert(request); case Delete: return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { this.applySessionToken(request); Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } }
the code which sets the consistency level in headers, uses `ConsistencyLeve.toString()`. hence the casing will be the same. ignore-case equality is not needed.
private void applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); boolean sessionTokenApplicable = Strings.areEqualIgnoreCase(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) || (this.defaultConsistencyLevel == ConsistencyLevel.SESSION && (!request.isReadOnlyRequest() || request.getResourceType() != ResourceType.Document || !Strings.areEqualIgnoreCase(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString()))); if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return; } if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { return; } String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } }
!Strings.areEqualIgnoreCase(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString())));
private void applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); boolean sessionTokenApplicable = Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) || (this.defaultConsistencyLevel == ConsistencyLevel.SESSION && (!request.isReadOnlyRequest() || request.getResourceType() != ResourceType.Document || !Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString()))); if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return; } if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { return; } String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } else { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, uri, uri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setTransportClientRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> new RxDocumentServiceResponse(this.clientContext, rsp)) .onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: return this.create(request); case Upsert: return this.upsert(request); case Delete: return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { this.applySessionToken(request); Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, uri, uri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setTransportClientRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> new RxDocumentServiceResponse(this.clientContext, rsp)) .onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Upsert: return this.upsert(request); case Delete: return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { this.applySessionToken(request); Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } }
Removed the code
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } else { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); }
request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN);
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, uri, uri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setTransportClientRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> new RxDocumentServiceResponse(this.clientContext, rsp)) .onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: return this.create(request); case Upsert: return this.upsert(request); case Delete: return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { this.applySessionToken(request); Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } private void applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); boolean sessionTokenApplicable = Strings.areEqualIgnoreCase(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) || (this.defaultConsistencyLevel == ConsistencyLevel.SESSION && (!request.isReadOnlyRequest() || request.getResourceType() != ResourceType.Document || !Strings.areEqualIgnoreCase(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString()))); if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return; } if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { return; } String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, uri, uri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setTransportClientRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> new RxDocumentServiceResponse(this.clientContext, rsp)) .onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Upsert: return this.upsert(request); case Delete: return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { this.applySessionToken(request); Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } private void applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); boolean sessionTokenApplicable = Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) || (this.defaultConsistencyLevel == ConsistencyLevel.SESSION && (!request.isReadOnlyRequest() || request.getResourceType() != ResourceType.Document || !Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString()))); if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return; } if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { return; } String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } } }
ACK
private void applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); boolean sessionTokenApplicable = Strings.areEqualIgnoreCase(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) || (this.defaultConsistencyLevel == ConsistencyLevel.SESSION && (!request.isReadOnlyRequest() || request.getResourceType() != ResourceType.Document || !Strings.areEqualIgnoreCase(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString()))); if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return; } if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { return; } String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } }
!Strings.areEqualIgnoreCase(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString())));
private void applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); boolean sessionTokenApplicable = Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) || (this.defaultConsistencyLevel == ConsistencyLevel.SESSION && (!request.isReadOnlyRequest() || request.getResourceType() != ResourceType.Document || !Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString()))); if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return; } if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { return; } String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } else { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, uri, uri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setTransportClientRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> new RxDocumentServiceResponse(this.clientContext, rsp)) .onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: return this.create(request); case Upsert: return this.upsert(request); case Delete: return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { this.applySessionToken(request); Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, uri, uri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setTransportClientRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> new RxDocumentServiceResponse(this.clientContext, rsp)) .onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Upsert: return this.upsert(request); case Delete: return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { this.applySessionToken(request); Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } }
Fixed
private void applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); boolean sessionTokenApplicable = Strings.areEqualIgnoreCase(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) || (this.defaultConsistencyLevel == ConsistencyLevel.SESSION && (!request.isReadOnlyRequest() || request.getResourceType() != ResourceType.Document || !Strings.areEqualIgnoreCase(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString()))); if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return; } if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { return; } String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } }
Strings.areEqualIgnoreCase(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) ||
private void applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); boolean sessionTokenApplicable = Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) || (this.defaultConsistencyLevel == ConsistencyLevel.SESSION && (!request.isReadOnlyRequest() || request.getResourceType() != ResourceType.Document || !Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString()))); if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return; } if (!sessionTokenApplicable || ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) { return; } String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } else { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, uri, uri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setTransportClientRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> new RxDocumentServiceResponse(this.clientContext, rsp)) .onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: return this.create(request); case Upsert: return this.upsert(request); case Delete: return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { this.applySessionToken(request); Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, uri, uri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setTransportClientRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> new RxDocumentServiceResponse(this.clientContext, rsp)) .onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Upsert: return this.upsert(request); case Delete: return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { this.applySessionToken(request); Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } }
is there way to make this part of code resuable?
protected Manager(HttpPipeline httpPipeline, AzureProfile profile, InnerT innerManagementClient) { this.httpPipeline = httpPipeline; if (httpPipeline != null) { ResourceManager.Configurable resourceConfigurable = ResourceManager.configure(); withInternalHttpPipeline(resourceConfigurable, httpPipeline); this.resourceManager = resourceConfigurable.authenticate(null, profile).withDefaultSubscription(); } this.subscriptionId = profile.getSubscriptionId(); this.environment = profile.getEnvironment(); this.innerManagementClient = innerManagementClient; }
ResourceManager.Configurable resourceConfigurable = ResourceManager.configure();
protected Manager(HttpPipeline httpPipeline, AzureProfile profile, InnerT innerManagementClient) { this.httpPipeline = httpPipeline; if (httpPipeline != null) { this.resourceManager = AzureConfigurableImpl .configureHttpPipeline(httpPipeline, ResourceManager.configure()) .authenticate(null, profile) .withDefaultSubscription(); } this.subscriptionId = profile.getSubscriptionId(); this.environment = profile.getEnvironment(); this.innerManagementClient = innerManagementClient; }
class Manager<InnerT> implements HasServiceClient<InnerT> { private ResourceManager resourceManager; private final String subscriptionId; private final AzureEnvironment environment; private final HttpPipeline httpPipeline; private final InnerT innerManagementClient; @Override public InnerT serviceClient() { return this.innerManagementClient; } /** * @return the ID of the subscription the manager is working with */ public String subscriptionId() { return this.subscriptionId; } /** * @return the Azure environment the manager is working with */ public AzureEnvironment environment() { return this.environment; } protected final void withResourceManager(ResourceManager resourceManager) { this.resourceManager = resourceManager; } protected void withInternalHttpPipeline(AzureConfigurable<?> azureConfigurable, HttpPipeline httpPipeline) { ((AzureConfigurableImpl) azureConfigurable).withInternalHttpPipeline(httpPipeline); } /** * @return the {@link ResourceManager} associated with this manager */ public ResourceManager resourceManager() { return this.resourceManager; } /** * @return the {@link HttpPipeline} associated with this manager */ public HttpPipeline httpPipeline() { return this.httpPipeline; } }
class Manager<InnerT> implements HasServiceClient<InnerT> { private ResourceManager resourceManager; private final String subscriptionId; private final AzureEnvironment environment; private final HttpPipeline httpPipeline; private final InnerT innerManagementClient; @Override public InnerT serviceClient() { return this.innerManagementClient; } /** * @return the ID of the subscription the manager is working with */ public String subscriptionId() { return this.subscriptionId; } /** * @return the Azure environment the manager is working with */ public AzureEnvironment environment() { return this.environment; } protected final void withResourceManager(ResourceManager resourceManager) { this.resourceManager = resourceManager; } /** * @return the {@link ResourceManager} associated with this manager */ public ResourceManager resourceManager() { return this.resourceManager; } /** * @return the {@link HttpPipeline} associated with this manager */ public HttpPipeline httpPipeline() { return this.httpPipeline; } }
The method `withInternalHttpPipeline` can be reused by each manager. But for `.authenticate`, it has to be called in each manager itself.
protected Manager(HttpPipeline httpPipeline, AzureProfile profile, InnerT innerManagementClient) { this.httpPipeline = httpPipeline; if (httpPipeline != null) { ResourceManager.Configurable resourceConfigurable = ResourceManager.configure(); withInternalHttpPipeline(resourceConfigurable, httpPipeline); this.resourceManager = resourceConfigurable.authenticate(null, profile).withDefaultSubscription(); } this.subscriptionId = profile.getSubscriptionId(); this.environment = profile.getEnvironment(); this.innerManagementClient = innerManagementClient; }
ResourceManager.Configurable resourceConfigurable = ResourceManager.configure();
protected Manager(HttpPipeline httpPipeline, AzureProfile profile, InnerT innerManagementClient) { this.httpPipeline = httpPipeline; if (httpPipeline != null) { this.resourceManager = AzureConfigurableImpl .configureHttpPipeline(httpPipeline, ResourceManager.configure()) .authenticate(null, profile) .withDefaultSubscription(); } this.subscriptionId = profile.getSubscriptionId(); this.environment = profile.getEnvironment(); this.innerManagementClient = innerManagementClient; }
class Manager<InnerT> implements HasServiceClient<InnerT> { private ResourceManager resourceManager; private final String subscriptionId; private final AzureEnvironment environment; private final HttpPipeline httpPipeline; private final InnerT innerManagementClient; @Override public InnerT serviceClient() { return this.innerManagementClient; } /** * @return the ID of the subscription the manager is working with */ public String subscriptionId() { return this.subscriptionId; } /** * @return the Azure environment the manager is working with */ public AzureEnvironment environment() { return this.environment; } protected final void withResourceManager(ResourceManager resourceManager) { this.resourceManager = resourceManager; } protected void withInternalHttpPipeline(AzureConfigurable<?> azureConfigurable, HttpPipeline httpPipeline) { ((AzureConfigurableImpl) azureConfigurable).withInternalHttpPipeline(httpPipeline); } /** * @return the {@link ResourceManager} associated with this manager */ public ResourceManager resourceManager() { return this.resourceManager; } /** * @return the {@link HttpPipeline} associated with this manager */ public HttpPipeline httpPipeline() { return this.httpPipeline; } }
class Manager<InnerT> implements HasServiceClient<InnerT> { private ResourceManager resourceManager; private final String subscriptionId; private final AzureEnvironment environment; private final HttpPipeline httpPipeline; private final InnerT innerManagementClient; @Override public InnerT serviceClient() { return this.innerManagementClient; } /** * @return the ID of the subscription the manager is working with */ public String subscriptionId() { return this.subscriptionId; } /** * @return the Azure environment the manager is working with */ public AzureEnvironment environment() { return this.environment; } protected final void withResourceManager(ResourceManager resourceManager) { this.resourceManager = resourceManager; } /** * @return the {@link ResourceManager} associated with this manager */ public ResourceManager resourceManager() { return this.resourceManager; } /** * @return the {@link HttpPipeline} associated with this manager */ public HttpPipeline httpPipeline() { return this.httpPipeline; } }
If this changed, we might able to delete a few overload in `TestUtilities` about random rg name and random guid.
public void testManageUsersGroupsAndRoles() { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageUsersGroupsAndRoles.runSample(azureResourceManager, profile)); }
Assertions.assertTrue(ManageUsersGroupsAndRoles.runSample(azureResourceManager, profile));
public void testManageUsersGroupsAndRoles() { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageUsersGroupsAndRoles.runSample(azureResourceManager, profile)); }
class GraphRbacTests extends ResourceManagerTestBase { private AzureResourceManager azureResourceManager; private AzureProfile profile; @Test @DoNotRecord @Test @DoNotRecord public void testManageServicePrincipalCredentials() throws IOException { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageServicePrincipalCredentials.runSample(azureResourceManager, profile)); } @Override protected HttpPipeline buildHttpPipeline( TokenCredential credential, AzureProfile profile, HttpLogOptions httpLogOptions, List<HttpPipelinePolicy> policies, HttpClient httpClient) { return HttpPipelineProvider.buildHttpPipeline( credential, profile, null, httpLogOptions, null, new RetryPolicy("Retry-After", ChronoUnit.SECONDS), policies, httpClient); } @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { azureResourceManager = buildManager(AzureResourceManager.class, httpPipeline, profile); this.profile = profile; } @Override protected void cleanUpResources() { } }
class GraphRbacTests extends ResourceManagerTestBase { private AzureResourceManager azureResourceManager; private AzureProfile profile; @Test @DoNotRecord @Test @DoNotRecord public void testManageServicePrincipalCredentials() throws IOException { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageServicePrincipalCredentials.runSample(azureResourceManager, profile)); } @Override protected HttpPipeline buildHttpPipeline( TokenCredential credential, AzureProfile profile, HttpLogOptions httpLogOptions, List<HttpPipelinePolicy> policies, HttpClient httpClient) { return HttpPipelineProvider.buildHttpPipeline( credential, profile, null, httpLogOptions, null, new RetryPolicy("Retry-After", ChronoUnit.SECONDS), policies, httpClient); } @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { azureResourceManager = buildManager(AzureResourceManager.class, httpPipeline, profile); this.profile = profile; } @Override protected void cleanUpResources() { } }
same comment about moving validation not to build method.
public ServiceBusReceiverAsyncClient buildAsyncClient() { final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(logger, entityType, queueName, topicName, subscriptionName, subQueue); validateAndThrow(prefetchCount); validateAndThrow(maxAutoLockRenewDuration); final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, maxAutoLockRenewDuration); return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, maxAutoLockRenewDuration); }
validateAndThrow(maxAutoLockRenewDuration);
public ServiceBusReceiverAsyncClient buildAsyncClient() { final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(logger, entityType, queueName, topicName, subscriptionName, SubQueue.NONE); final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); if (receiveMode == ReceiveMode.RECEIVE_AND_DELETE) { maxAutoLockRenewDuration = Duration.ZERO; } final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, sessionId, isRollingSessionReceiver(), maxConcurrentSessions, maxAutoLockRenewDuration); final ServiceBusSessionManager sessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, sessionManager); }
class ServiceBusSessionReceiverClientBuilder { private Integer maxConcurrentSessions = null; private int prefetchCount = DEFAULT_PREFETCH_COUNT; private String queueName; private ReceiveMode receiveMode = ReceiveMode.PEEK_LOCK; private String sessionId; private String subscriptionName; private String topicName; private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION; private ServiceBusSessionReceiverClientBuilder() { } /** * Sets the amount of time to continue auto-renewing the session lock. Setting {@link Duration * {@code null} disables auto-renewal. * * @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the session lock. * {@link Duration * * @return The updated {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { this.maxAutoLockRenewDuration = maxAutoLockRenewDuration; return this; } /** * Enables session processing roll-over by processing at most {@code maxConcurrentSessions}. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ public ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError(new IllegalArgumentException( "maxConcurrentSessions cannot be less than 1.")); } this.maxConcurrentSessions = maxConcurrentSessions; return this; } /** * Sets the prefetch count of the receiver. For both {@link ReceiveMode * ReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application asks for one using {@link ServiceBusReceiverAsyncClient * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off. * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) { validateAndThrow(prefetchCount); this.prefetchCount = prefetchCount; return this; } /** * Sets the name of the queue to create a receiver for. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the receive mode for the receiver. * * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder receiveMode(ReceiveMode receiveMode) { this.receiveMode = receiveMode; return this; } /** * Sets the session id. * * @param sessionId session id. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder sessionId(String sessionId) { this.sessionId = sessionId; return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) { this.subscriptionName = subscriptionName; return this; } /** * Sets the name of the topic. <b>{@link * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading * {@link ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link * @throws IllegalArgumentException { */ /** * Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading * {@link ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusReceiverClient buildClient() { return new ServiceBusReceiverClient(buildAsyncClient(), retryOptions.getTryTimeout()); } /** * This is a rolling session receiver only if maxConcurrentSessions is > 0 AND sessionId is null or empty. If * there is a sessionId, this is going to be a single, named session receiver. * * @return {@code true} if this is an unnamed rolling session receiver; {@code false} otherwise. */ private boolean isRollingSessionReceiver() { if (maxConcurrentSessions == null) { return false; } if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("Maximum number of concurrent sessions must be positive.")); } return CoreUtils.isNullOrEmpty(sessionId); } }
class ServiceBusSessionReceiverClientBuilder { private Integer maxConcurrentSessions = null; private int prefetchCount = DEFAULT_PREFETCH_COUNT; private String queueName; private ReceiveMode receiveMode = ReceiveMode.PEEK_LOCK; private String sessionId; private String subscriptionName; private String topicName; private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION; private ServiceBusSessionReceiverClientBuilder() { } /** * Sets the amount of time to continue auto-renewing the session lock. Setting {@link Duration * {@code null} disables auto-renewal. For {@link ReceiveMode * auto-renewal is disabled. * * @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the session lock. * {@link Duration * * @return The updated {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative. */ public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { validateAndThrow(maxAutoLockRenewDuration); this.maxAutoLockRenewDuration = maxAutoLockRenewDuration; return this; } /** * Enables session processing roll-over by processing at most {@code maxConcurrentSessions}. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ public ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError(new IllegalArgumentException( "maxConcurrentSessions cannot be less than 1.")); } this.maxConcurrentSessions = maxConcurrentSessions; return this; } /** * Sets the prefetch count of the receiver. For both {@link ReceiveMode * ReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application asks for one using {@link ServiceBusReceiverAsyncClient * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off. * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException If {code prefetchCount} is negative. */ public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) { validateAndThrow(prefetchCount); this.prefetchCount = prefetchCount; return this; } /** * Sets the name of the queue to create a receiver for. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the receive mode for the receiver. * * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder receiveMode(ReceiveMode receiveMode) { this.receiveMode = receiveMode; return this; } /** * Sets the session id. * * @param sessionId session id. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder sessionId(String sessionId) { this.sessionId = sessionId; return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) { this.subscriptionName = subscriptionName; return this; } /** * Sets the name of the topic. <b>{@link * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading * {@link ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ /** * Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading * {@link ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusReceiverClient buildClient() { return new ServiceBusReceiverClient(buildAsyncClient(), retryOptions.getTryTimeout()); } /** * This is a rolling session receiver only if maxConcurrentSessions is > 0 AND sessionId is null or empty. If * there is a sessionId, this is going to be a single, named session receiver. * * @return {@code true} if this is an unnamed rolling session receiver; {@code false} otherwise. */ private boolean isRollingSessionReceiver() { if (maxConcurrentSessions == null) { return false; } if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("Maximum number of concurrent sessions must be positive.")); } return CoreUtils.isNullOrEmpty(sessionId); } }
Should move validation to the maxAutoLockRenewDuration setter.
public ServiceBusReceiverAsyncClient buildAsyncClient() { final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(logger, entityType, queueName, topicName, subscriptionName, SubQueue.NONE); validateAndThrow(prefetchCount); validateAndThrow(maxAutoLockRenewDuration); final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, sessionId, isRollingSessionReceiver(), maxConcurrentSessions, maxAutoLockRenewDuration); final ServiceBusSessionManager sessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, maxAutoLockRenewDuration, sessionManager); }
validateAndThrow(maxAutoLockRenewDuration);
public ServiceBusReceiverAsyncClient buildAsyncClient() { final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(logger, entityType, queueName, topicName, subscriptionName, SubQueue.NONE); final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); if (receiveMode == ReceiveMode.RECEIVE_AND_DELETE) { maxAutoLockRenewDuration = Duration.ZERO; } final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, sessionId, isRollingSessionReceiver(), maxConcurrentSessions, maxAutoLockRenewDuration); final ServiceBusSessionManager sessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, sessionManager); }
class ServiceBusSessionReceiverClientBuilder { private Integer maxConcurrentSessions = null; private int prefetchCount = DEFAULT_PREFETCH_COUNT; private String queueName; private ReceiveMode receiveMode = ReceiveMode.PEEK_LOCK; private String sessionId; private String subscriptionName; private String topicName; private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION; private ServiceBusSessionReceiverClientBuilder() { } /** * Sets the amount of time to continue auto-renewing the session lock. Setting {@link Duration * {@code null} disables auto-renewal. * * @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the session lock. * {@link Duration * * @return The updated {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { this.maxAutoLockRenewDuration = maxAutoLockRenewDuration; return this; } /** * Enables session processing roll-over by processing at most {@code maxConcurrentSessions}. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ public ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError(new IllegalArgumentException( "maxConcurrentSessions cannot be less than 1.")); } this.maxConcurrentSessions = maxConcurrentSessions; return this; } /** * Sets the prefetch count of the receiver. For both {@link ReceiveMode * ReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application asks for one using {@link ServiceBusReceiverAsyncClient * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off. * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) { validateAndThrow(prefetchCount); this.prefetchCount = prefetchCount; return this; } /** * Sets the name of the queue to create a receiver for. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the receive mode for the receiver. * * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder receiveMode(ReceiveMode receiveMode) { this.receiveMode = receiveMode; return this; } /** * Sets the session id. * * @param sessionId session id. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder sessionId(String sessionId) { this.sessionId = sessionId; return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) { this.subscriptionName = subscriptionName; return this; } /** * Sets the name of the topic. <b>{@link * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading * {@link ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link * @throws IllegalArgumentException { */ /** * Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading * {@link ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusReceiverClient buildClient() { return new ServiceBusReceiverClient(buildAsyncClient(), retryOptions.getTryTimeout()); } /** * This is a rolling session receiver only if maxConcurrentSessions is > 0 AND sessionId is null or empty. If * there is a sessionId, this is going to be a single, named session receiver. * * @return {@code true} if this is an unnamed rolling session receiver; {@code false} otherwise. */ private boolean isRollingSessionReceiver() { if (maxConcurrentSessions == null) { return false; } if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("Maximum number of concurrent sessions must be positive.")); } return CoreUtils.isNullOrEmpty(sessionId); } }
class ServiceBusSessionReceiverClientBuilder { private Integer maxConcurrentSessions = null; private int prefetchCount = DEFAULT_PREFETCH_COUNT; private String queueName; private ReceiveMode receiveMode = ReceiveMode.PEEK_LOCK; private String sessionId; private String subscriptionName; private String topicName; private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION; private ServiceBusSessionReceiverClientBuilder() { } /** * Sets the amount of time to continue auto-renewing the session lock. Setting {@link Duration * {@code null} disables auto-renewal. For {@link ReceiveMode * auto-renewal is disabled. * * @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the session lock. * {@link Duration * * @return The updated {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative. */ public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { validateAndThrow(maxAutoLockRenewDuration); this.maxAutoLockRenewDuration = maxAutoLockRenewDuration; return this; } /** * Enables session processing roll-over by processing at most {@code maxConcurrentSessions}. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ public ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError(new IllegalArgumentException( "maxConcurrentSessions cannot be less than 1.")); } this.maxConcurrentSessions = maxConcurrentSessions; return this; } /** * Sets the prefetch count of the receiver. For both {@link ReceiveMode * ReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application asks for one using {@link ServiceBusReceiverAsyncClient * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off. * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException If {code prefetchCount} is negative. */ public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) { validateAndThrow(prefetchCount); this.prefetchCount = prefetchCount; return this; } /** * Sets the name of the queue to create a receiver for. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the receive mode for the receiver. * * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder receiveMode(ReceiveMode receiveMode) { this.receiveMode = receiveMode; return this; } /** * Sets the session id. * * @param sessionId session id. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder sessionId(String sessionId) { this.sessionId = sessionId; return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) { this.subscriptionName = subscriptionName; return this; } /** * Sets the name of the topic. <b>{@link * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading * {@link ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ /** * Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading * {@link ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusReceiverClient buildClient() { return new ServiceBusReceiverClient(buildAsyncClient(), retryOptions.getTryTimeout()); } /** * This is a rolling session receiver only if maxConcurrentSessions is > 0 AND sessionId is null or empty. If * there is a sessionId, this is going to be a single, named session receiver. * * @return {@code true} if this is an unnamed rolling session receiver; {@code false} otherwise. */ private boolean isRollingSessionReceiver() { if (maxConcurrentSessions == null) { return false; } if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("Maximum number of concurrent sessions must be positive.")); } return CoreUtils.isNullOrEmpty(sessionId); } }
Same with this one. I'm not sure if Yijun fixed this too.
public ServiceBusReceiverAsyncClient buildAsyncClient() { final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(logger, entityType, queueName, topicName, subscriptionName, SubQueue.NONE); validateAndThrow(prefetchCount); validateAndThrow(maxAutoLockRenewDuration); final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, sessionId, isRollingSessionReceiver(), maxConcurrentSessions, maxAutoLockRenewDuration); final ServiceBusSessionManager sessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, maxAutoLockRenewDuration, sessionManager); }
validateAndThrow(prefetchCount);
public ServiceBusReceiverAsyncClient buildAsyncClient() { final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(logger, entityType, queueName, topicName, subscriptionName, SubQueue.NONE); final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); if (receiveMode == ReceiveMode.RECEIVE_AND_DELETE) { maxAutoLockRenewDuration = Duration.ZERO; } final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, sessionId, isRollingSessionReceiver(), maxConcurrentSessions, maxAutoLockRenewDuration); final ServiceBusSessionManager sessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, sessionManager); }
class ServiceBusSessionReceiverClientBuilder { private Integer maxConcurrentSessions = null; private int prefetchCount = DEFAULT_PREFETCH_COUNT; private String queueName; private ReceiveMode receiveMode = ReceiveMode.PEEK_LOCK; private String sessionId; private String subscriptionName; private String topicName; private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION; private ServiceBusSessionReceiverClientBuilder() { } /** * Sets the amount of time to continue auto-renewing the session lock. Setting {@link Duration * {@code null} disables auto-renewal. * * @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the session lock. * {@link Duration * * @return The updated {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { this.maxAutoLockRenewDuration = maxAutoLockRenewDuration; return this; } /** * Enables session processing roll-over by processing at most {@code maxConcurrentSessions}. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ public ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError(new IllegalArgumentException( "maxConcurrentSessions cannot be less than 1.")); } this.maxConcurrentSessions = maxConcurrentSessions; return this; } /** * Sets the prefetch count of the receiver. For both {@link ReceiveMode * ReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application asks for one using {@link ServiceBusReceiverAsyncClient * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off. * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) { validateAndThrow(prefetchCount); this.prefetchCount = prefetchCount; return this; } /** * Sets the name of the queue to create a receiver for. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the receive mode for the receiver. * * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder receiveMode(ReceiveMode receiveMode) { this.receiveMode = receiveMode; return this; } /** * Sets the session id. * * @param sessionId session id. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder sessionId(String sessionId) { this.sessionId = sessionId; return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) { this.subscriptionName = subscriptionName; return this; } /** * Sets the name of the topic. <b>{@link * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading * {@link ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link * @throws IllegalArgumentException { */ /** * Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading * {@link ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusReceiverClient buildClient() { return new ServiceBusReceiverClient(buildAsyncClient(), retryOptions.getTryTimeout()); } /** * This is a rolling session receiver only if maxConcurrentSessions is > 0 AND sessionId is null or empty. If * there is a sessionId, this is going to be a single, named session receiver. * * @return {@code true} if this is an unnamed rolling session receiver; {@code false} otherwise. */ private boolean isRollingSessionReceiver() { if (maxConcurrentSessions == null) { return false; } if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("Maximum number of concurrent sessions must be positive.")); } return CoreUtils.isNullOrEmpty(sessionId); } }
class ServiceBusSessionReceiverClientBuilder { private Integer maxConcurrentSessions = null; private int prefetchCount = DEFAULT_PREFETCH_COUNT; private String queueName; private ReceiveMode receiveMode = ReceiveMode.PEEK_LOCK; private String sessionId; private String subscriptionName; private String topicName; private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION; private ServiceBusSessionReceiverClientBuilder() { } /** * Sets the amount of time to continue auto-renewing the session lock. Setting {@link Duration * {@code null} disables auto-renewal. For {@link ReceiveMode * auto-renewal is disabled. * * @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the session lock. * {@link Duration * * @return The updated {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative. */ public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { validateAndThrow(maxAutoLockRenewDuration); this.maxAutoLockRenewDuration = maxAutoLockRenewDuration; return this; } /** * Enables session processing roll-over by processing at most {@code maxConcurrentSessions}. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ public ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError(new IllegalArgumentException( "maxConcurrentSessions cannot be less than 1.")); } this.maxConcurrentSessions = maxConcurrentSessions; return this; } /** * Sets the prefetch count of the receiver. For both {@link ReceiveMode * ReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application asks for one using {@link ServiceBusReceiverAsyncClient * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off. * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException If {code prefetchCount} is negative. */ public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) { validateAndThrow(prefetchCount); this.prefetchCount = prefetchCount; return this; } /** * Sets the name of the queue to create a receiver for. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the receive mode for the receiver. * * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder receiveMode(ReceiveMode receiveMode) { this.receiveMode = receiveMode; return this; } /** * Sets the session id. * * @param sessionId session id. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder sessionId(String sessionId) { this.sessionId = sessionId; return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) { this.subscriptionName = subscriptionName; return this; } /** * Sets the name of the topic. <b>{@link * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading * {@link ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ /** * Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading * {@link ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusReceiverClient buildClient() { return new ServiceBusReceiverClient(buildAsyncClient(), retryOptions.getTryTimeout()); } /** * This is a rolling session receiver only if maxConcurrentSessions is > 0 AND sessionId is null or empty. If * there is a sessionId, this is going to be a single, named session receiver. * * @return {@code true} if this is an unnamed rolling session receiver; {@code false} otherwise. */ private boolean isRollingSessionReceiver() { if (maxConcurrentSessions == null) { return false; } if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("Maximum number of concurrent sessions must be positive.")); } return CoreUtils.isNullOrEmpty(sessionId); } }
a zero length array. I am checking for 0 length also here.
public static BinaryData fromString(String data) { if (Objects.isNull(data)) { return new BinaryData(EMPTY_DATA); } else { return new BinaryData(data.getBytes(StandardCharsets.UTF_8)); } }
return new BinaryData(data.getBytes(StandardCharsets.UTF_8));
public static BinaryData fromString(String data) { if (Objects.isNull(data) || data.length() == 0) { return EMPTY_DATA; } return new BinaryData(data.getBytes(StandardCharsets.UTF_8)); }
class BinaryData { private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class); private static JsonSerializer defaultJsonSerializer; private static byte[] EMPTY_DATA = new byte[0]; private final byte[] data; /** * Create instance of {@link BinaryData} given the data. If {@code null} value is provided , it will be converted * into empty byte array. * @param data to represent as bytes. */ BinaryData(byte[] data) { if (Objects.isNull(data)) { data = EMPTY_DATA; } this.data = Arrays.copyOf(data, data.length); } /** * Provides {@link InputStream} for the data represented by this {@link BinaryData} object. * * <p><strong>Get InputStream from BinaryData</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.to * * @return {@link InputStream} representing the binary data. */ public InputStream toStream() { return new ByteArrayInputStream(this.data); } /** * Create {@link BinaryData} instance with given {@link InputStream} as source of data. The {@link InputStream} is * not closed by this function. * * <p><strong>Create an instance from InputStream</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param inputStream to read bytes from. * @throws UncheckedIOException If any error in reading from {@link InputStream}. * @throws NullPointerException if {@code inputStream} is null. * @return {@link BinaryData} representing the binary data. */ public static BinaryData fromStream(InputStream inputStream) { Objects.requireNonNull(inputStream, "'inputStream' cannot be null."); final int bufferSize = 1024; try { ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream(); int nRead; byte[] data = new byte[bufferSize]; while ((nRead = inputStream.read(data, 0, data.length)) != -1) { dataOutputBuffer.write(data, 0, nRead); } return fromBytes(dataOutputBuffer.toByteArray()); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } } /** * Asynchronously create {@link BinaryData} instance with given {@link InputStream} as source of data. The * {@link InputStream} is not closed by this function. * * @param inputStream to read bytes from. * @throws NullPointerException if {@code inputStream} is null. * @return {@link Mono} of {@link BinaryData} representing the binary data. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) { Objects.requireNonNull(inputStream, "'inputStream' cannot be null."); return Mono.fromCallable(() -> fromStream(inputStream)); } /** * Create {@link BinaryData} instance with given {@link Flux} of {@link ByteBuffer} as source of data. It will * collect all the bytes from {@link ByteBuffer} into {@link BinaryData}. * * <p><strong>Create an instance from String</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param data to use. * @throws NullPointerException if {@code inputStream} is null. * @return {@link Mono} of {@link BinaryData} representing binary data. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) { if (Objects.isNull(data)) { return monoError(LOGGER, new NullPointerException("'data' cannot be null.")); } return FluxUtil.collectBytesInByteBufferStream(data) .flatMap(bytes -> Mono.just(fromBytes(bytes))); } /** * Create {@link BinaryData} instance with given data. The {@link String} is converted into bytes using * {@link StandardCharsets * empty byte array. * * @param data to use. * @return {@link BinaryData} representing binary data. */ /** * Create {@link BinaryData} instance with given byte array data. If {@code null} value is provided , it will be * converted into empty byte array. * * @param data to use. * @return {@link BinaryData} representing the binary data. */ public static BinaryData fromBytes(byte[] data) { return new BinaryData(data); } /** * Serialize the given {@link Object} into {@link BinaryData} using json serializer which is available in classpath. * The serializer must implement {@link JsonSerializer} interface. A singleton instance of {@link JsonSerializer} * is kept for this class to use. If {@code null} data is provided , it will be converted into empty byte array. * * @param data The {@link Object} which needs to be serialized into bytes. * @throws IllegalStateException If a {@link JsonSerializer} cannot be found on the classpath. * @return {@link BinaryData} representing binary data. * * @see JsonSerializer */ public static BinaryData fromObject(Object data) { if (Objects.isNull(data)) { return new BinaryData(EMPTY_DATA); } final ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); loadDefaultSerializer(); defaultJsonSerializer.serialize(outputStream, data); return new BinaryData(outputStream.toByteArray()); } /** * Serialize the given {@link Object} into {@link BinaryData} using the provided {@link ObjectSerializer}. * If {@code null} data is provided , it will be converted into empty byte array. * * @param data The {@link Object} which needs to be serialized into bytes. * @param serializer to use for serializing the object. * @throws NullPointerException if {@code serializer} is null. * @return {@link BinaryData} representing binary data. */ public static BinaryData fromObject(Object data, ObjectSerializer serializer) { if (Objects.isNull(data)) { return new BinaryData(EMPTY_DATA); } Objects.requireNonNull(serializer, "'serializer' cannot be null."); final ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); serializer.serialize(outputStream, data); return new BinaryData(outputStream.toByteArray()); } /** * Serialize the given {@link Object} into {@link Mono} {@link BinaryData} using the provided * {@link ObjectSerializer}. If {@code null} data is provided , it will be converted into empty byte array. * * @param data The {@link Object} which needs to be serialized into bytes. * @param serializer to use for serializing the object. * @throws NullPointerException if {@code serializer} is null. * @return {@link Mono} of {@link BinaryData} representing the binary data. */ public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) { return Mono.fromCallable(() -> fromObject(data, serializer)); } /** * Provides byte array representation of this {@link BinaryData} object. * * @return byte array representation of the the data. */ public byte[] toBytes() { return Arrays.copyOf(this.data, this.data.length); } /** * Provides {@link String} representation of this {@link BinaryData} object. The bytes are converted into * {@link String} using {@link StandardCharsets * * @return {@link String} representation of the data. */ public String toString() { return new String(this.data, StandardCharsets.UTF_8); } /** * Deserialize the bytes into the {@link Object} of given type by applying the provided {@link ObjectSerializer} on * the data. * * @param clazz representing the type of the Object. * @param serializer to use deserialize data into type. * @param <T> Generic type that the data is deserialized into. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) { Objects.requireNonNull(clazz, "'clazz' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); TypeReference<T> ref = TypeReference.createInstance(clazz); InputStream jsonStream = new ByteArrayInputStream(this.data); return serializer.deserialize(jsonStream, ref); } /** * Return a {@link Mono} by deserialize the bytes into the {@link Object} of given type after applying the provided * {@link ObjectSerializer} on the {@link BinaryData}. * * <p><strong>Gets the specified object</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.to * * @param clazz representing the type of the Object. * @param serializer to use deserialize data into type. * @param <T> Generic type that the data is deserialized into. * @throws NullPointerException if {@code clazz} or {@code serializer} is null. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) { return Mono.fromCallable(() -> toObject(clazz, serializer)); } /** * Deserialize the bytes into the {@link Object} of given type by applying the provided {@link ObjectSerializer} on * the data. The serializer must implement {@link JsonSerializer} interface. * * @param clazz representing the type of the Object. * @param <T> Generic type that the data is deserialized into. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> T toObject(Class<T> clazz) { Objects.requireNonNull(clazz, "'clazz' cannot be null."); TypeReference<T> ref = TypeReference.createInstance(clazz); InputStream jsonStream = new ByteArrayInputStream(this.data); loadDefaultSerializer(); return defaultJsonSerializer.deserialize(jsonStream, ref); } /** * Return a {@link Mono} by deserialize the bytes into the {@link Object} of given type after applying the Json * serializer found on classpath. * * <p><strong>Gets the specified object</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.to * * @param clazz representing the type of the Object. * @param <T> Generic type that the data is deserialized into. * @throws NullPointerException if {@code clazz} or {@code serializer} is null. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> Mono<T> toObjectAsync(Class<T> clazz) { return Mono.fromCallable(() -> toObject(clazz)); } private static void loadDefaultSerializer() { if (defaultJsonSerializer == null) { defaultJsonSerializer = JsonSerializerProviders.createInstance(); } } }
class BinaryData { private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class); private static final BinaryData EMPTY_DATA = new BinaryData(new byte[0]); private static final Object LOCK = new Object(); private final byte[] data; private static volatile JsonSerializer defaultJsonSerializer; /** * Create an instance of {@link BinaryData} from the given data. * * @param data to represent as bytes. */ BinaryData(byte[] data) { this.data = data; } /** * Creates a {@link BinaryData} instance with given {@link InputStream} as source of data. The {@link InputStream} * is not closed by this function. * * <p><strong>Create an instance from InputStream</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param inputStream to read bytes from. * @throws UncheckedIOException If any error in reading from {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. * @return {@link BinaryData} representing the binary data. */ public static BinaryData fromStream(InputStream inputStream) { if (Objects.isNull(inputStream)) { return EMPTY_DATA; } final int bufferSize = 1024; try { ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream(); int nRead; byte[] data = new byte[bufferSize]; while ((nRead = inputStream.read(data, 0, data.length)) != -1) { dataOutputBuffer.write(data, 0, nRead); } dataOutputBuffer.flush(); return new BinaryData(dataOutputBuffer.toByteArray()); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } } /** * Asynchronously creates a {@link BinaryData} instance with the given {@link InputStream} as source of data. The * {@link InputStream} is not closed by this function. If the {@link InputStream} is {@code null}, an empty * {@link BinaryData} will be returned. * * @param inputStream to read bytes from. * @return {@link Mono} of {@link BinaryData} representing the binary data. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) { return Mono.fromCallable(() -> fromStream(inputStream)); } /** * Creates a {@link BinaryData} instance with given {@link Flux} of {@link ByteBuffer} as source of data. It will * collect all the bytes from {@link ByteBuffer} into {@link BinaryData}. If the {@link Flux} is {@code null}, an * empty {@link BinaryData} will be returned. * * <p><strong>Create an instance from String</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param data to use. * @return {@link Mono} of {@link BinaryData} representing binary data. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) { if (Objects.isNull(data)) { return Mono.just(EMPTY_DATA); } return FluxUtil.collectBytesInByteBufferStream(data) .flatMap(bytes -> Mono.just(new BinaryData(bytes))); } /** * Creates a {@link BinaryData} instance with given data. The {@link String} is converted into bytes using UTF_8 * character set. If the String is {@code null}, an empty {@link BinaryData} will be returned. * * @param data to use. * @return {@link BinaryData} representing binary data. */ /** * Creates a {@link BinaryData} instance with given byte array data. If the byte array is {@code null}, an empty * {@link BinaryData} will be returned. * * @param data to use. * @return {@link BinaryData} representing the binary data. */ public static BinaryData fromBytes(byte[] data) { if (Objects.isNull(data) || data.length == 0) { return EMPTY_DATA; } return new BinaryData(Arrays.copyOf(data, data.length)); } /** * Serialize the given {@link Object} into {@link BinaryData} using json serializer which is available on classpath. * The serializer on classpath must implement {@link JsonSerializer} interface. If the given Object is {@code null}, * an empty {@link BinaryData} will be returned. * <p><strong>Code sample</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from.default.serializer * @param data The {@link Object} which needs to be serialized into bytes. * @throws IllegalStateException If a {@link JsonSerializer} cannot be found on the classpath. * @return {@link BinaryData} representing the JSON serialized object. * * @see JsonSerializer * @see <a href="ObjectSerializer" target="_blank">More about serialization</a> */ public static BinaryData fromObject(Object data) { if (Objects.isNull(data)) { return EMPTY_DATA; } final ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); getDefaultSerializer().serialize(outputStream, data); return new BinaryData(outputStream.toByteArray()); } /** * Serialize the given {@link Object} into {@link BinaryData} using the provided {@link ObjectSerializer}. * If the Object is {@code null}, an empty {@link BinaryData} will be returned. * <p>You can provide your custom implementation of {@link ObjectSerializer} interface or use one provided in azure * sdk by adding them as dependency. These implementations could be found at * <a href="https: * and <a href="https: * * <p><strong>Create an instance from Object</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param data The {@link Object} which needs to be serialized into bytes. * @param serializer to use for serializing the object. * @throws NullPointerException If {@code serializer} is null. * @return {@link BinaryData} representing binary data. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static BinaryData fromObject(Object data, ObjectSerializer serializer) { if (Objects.isNull(data)) { return EMPTY_DATA; } Objects.requireNonNull(serializer, "'serializer' cannot be null."); final ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); serializer.serialize(outputStream, data); return new BinaryData(outputStream.toByteArray()); } /** * Serialize the given {@link Object} into {@link Mono} {@link BinaryData} using the provided * {@link ObjectSerializer}. If the Object is {@code null}, an empty {@link BinaryData} will be returned. * * <p>You can provide your custom implementation of {@link ObjectSerializer} interface or use one provided in azure * sdk by adding them as dependency. These implementations could be found at * <a href="https: * and <a href="https: * * @param data The {@link Object} which needs to be serialized into bytes. * @param serializer to use for serializing the object. * @throws NullPointerException If {@code serializer} is null. * @return {@link Mono} of {@link BinaryData} representing the binary data. * @see ObjectSerializer * @see <a href="https: */ public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) { if (Objects.isNull(serializer)) { return monoError(LOGGER, new NullPointerException("'serializer' cannot be null.")); } return Mono.fromCallable(() -> fromObject(data, serializer)); } /** * Provides byte array representation of this {@link BinaryData} object. * * @return byte array representation of the the data. */ public byte[] toBytes() { return Arrays.copyOf(this.data, this.data.length); } /** * Provides {@link String} representation of this {@link BinaryData} object. The bytes are converted into * {@link String} using the UTF-8 character set. * * @return {@link String} representation of the data. */ public String toString() { return new String(this.data, StandardCharsets.UTF_8); } /** * Deserialize the bytes into the {@link Object} of given type by applying the provided {@link ObjectSerializer} on * the data. * * <p>You can provide your custom implementation of {@link ObjectSerializer} interface or use one provided in azure * sdk by adding them as dependency. These implementations could be found at * <a href="https: * and <a href="https: * * <p><strong>Code sample</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.to * @param clazz representing the type of the Object. * @param serializer to use deserialize data into type. * @param <T> Generic type that the data is deserialized into. * @throws NullPointerException If {@code serializer} or {@code clazz} is null. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) { Objects.requireNonNull(clazz, "'clazz' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); TypeReference<T> ref = TypeReference.createInstance(clazz); InputStream jsonStream = new ByteArrayInputStream(this.data); return serializer.deserialize(jsonStream, ref); } /** * Return a {@link Mono} by deserializing the bytes into the {@link Object} of given type after applying the * provided {@link ObjectSerializer} on the {@link BinaryData}. * * <p>You can provide your custom implementation of {@link ObjectSerializer} interface or use one provided in azure * sdk by adding them as dependency. These implementations could be found at * <a href="https: * and <a href="https: * * <p><strong>Gets the specified object</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.to * * @param clazz representing the type of the Object. * @param serializer to use deserialize data into type. * @param <T> Generic type that the data is deserialized into. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) { if (Objects.isNull(clazz)) { return monoError(LOGGER, new NullPointerException("'clazz' cannot be null.")); } else if (Objects.isNull(serializer)) { return monoError(LOGGER, new NullPointerException("'serializer' cannot be null.")); } return Mono.fromCallable(() -> toObject(clazz, serializer)); } /** * Deserialize the bytes into the {@link Object} of given type by using json serializer which is available in * classpath. The serializer must implement {@link JsonSerializer} interface. A singleton instance of * {@link JsonSerializer} is kept for this class to use. * * @param clazz representing the type of the Object. * @param <T> Generic type that the data is deserialized into. * @throws NullPointerException If {@code clazz} is null. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> T toObject(Class<T> clazz) { Objects.requireNonNull(clazz, "'clazz' cannot be null."); TypeReference<T> ref = TypeReference.createInstance(clazz); InputStream jsonStream = new ByteArrayInputStream(this.data); return getDefaultSerializer().deserialize(jsonStream, ref); } /** * Return a {@link Mono} by deserializing the bytes into the {@link Object} of given type after applying the Json * serializer found on classpath. * * <p><strong>Gets the specified object</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.to * * @param clazz representing the type of the Object. * @param <T> Generic type that the data is deserialized into. * @throws NullPointerException If {@code clazz} is null. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> Mono<T> toObjectAsync(Class<T> clazz) { if (Objects.isNull(clazz)) { return monoError(LOGGER, new NullPointerException("'clazz' cannot be null.")); } return Mono.fromCallable(() -> toObject(clazz)); } /** * Provides {@link InputStream} for the data represented by this {@link BinaryData} object. * * <p><strong>Get InputStream from BinaryData</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.to * * @return {@link InputStream} representing the binary data. */ public InputStream toStream() { return new ByteArrayInputStream(this.data); } /* This will ensure lazy instantiation to avoid hard dependency on Json Serializer. */ private static JsonSerializer getDefaultSerializer() { if (defaultJsonSerializer == null) { synchronized (LOCK) { if (defaultJsonSerializer == null) { defaultJsonSerializer = JsonSerializerProviders.createInstance(); } } } return defaultJsonSerializer; } }
new up a static final instance of this BinaryData so that you don't need to do it every time, and just reuse that.
public static BinaryData fromString(String data) { if (Objects.isNull(data) || data.length() == 0) { return new BinaryData(EMPTY_DATA); } else { return new BinaryData(data.getBytes(StandardCharsets.UTF_8)); } }
return new BinaryData(EMPTY_DATA);
public static BinaryData fromString(String data) { if (Objects.isNull(data) || data.length() == 0) { return EMPTY_DATA; } return new BinaryData(data.getBytes(StandardCharsets.UTF_8)); }
class BinaryData { private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class); private static final byte[] EMPTY_DATA = new byte[0]; private static JsonSerializer defaultJsonSerializer; private final byte[] data; /** * Create instance of {@link BinaryData} given the data. If {@code null} value is provided , it will be converted * into empty byte array. * @param data to represent as bytes. */ BinaryData(byte[] data) { if (Objects.isNull(data) || data.length == 0) { data = EMPTY_DATA; } this.data = Arrays.copyOf(data, data.length); } /** * Provides {@link InputStream} for the data represented by this {@link BinaryData} object. * * <p><strong>Get InputStream from BinaryData</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.to * * @return {@link InputStream} representing the binary data. */ public InputStream toStream() { return new ByteArrayInputStream(this.data); } /** * Create {@link BinaryData} instance with given {@link InputStream} as source of data. The {@link InputStream} is * not closed by this function. * * <p><strong>Create an instance from InputStream</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param inputStream to read bytes from. * @throws UncheckedIOException If any error in reading from {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. * @return {@link BinaryData} representing the binary data. */ public static BinaryData fromStream(InputStream inputStream) { Objects.requireNonNull(inputStream, "'inputStream' cannot be null."); final int bufferSize = 1024; try { ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream(); int nRead; byte[] data = new byte[bufferSize]; while ((nRead = inputStream.read(data, 0, data.length)) != -1) { dataOutputBuffer.write(data, 0, nRead); } return fromBytes(dataOutputBuffer.toByteArray()); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } } /** * Asynchronously create {@link BinaryData} instance with given {@link InputStream} as source of data. The * {@link InputStream} is not closed by this function. * * @param inputStream to read bytes from. * @throws NullPointerException If {@code inputStream} is null. * @return {@link Mono} of {@link BinaryData} representing the binary data. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) { Objects.requireNonNull(inputStream, "'inputStream' cannot be null."); return Mono.fromCallable(() -> fromStream(inputStream)); } /** * Create {@link BinaryData} instance with given {@link Flux} of {@link ByteBuffer} as source of data. It will * collect all the bytes from {@link ByteBuffer} into {@link BinaryData}. * * <p><strong>Create an instance from String</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param data to use. * @throws NullPointerException If {@code inputStream} is null. * @return {@link Mono} of {@link BinaryData} representing binary data. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) { if (Objects.isNull(data)) { return monoError(LOGGER, new NullPointerException("'data' cannot be null.")); } return FluxUtil.collectBytesInByteBufferStream(data) .flatMap(bytes -> Mono.just(fromBytes(bytes))); } /** * Create {@link BinaryData} instance with given data. The {@link String} is converted into bytes using * {@link StandardCharsets * empty byte array. * * @param data to use. * @return {@link BinaryData} representing binary data. */ /** * Create {@link BinaryData} instance with given byte array data. If {@code null} value is provided , it will be * converted into empty byte array. * * @param data to use. * @return {@link BinaryData} representing the binary data. */ public static BinaryData fromBytes(byte[] data) { return new BinaryData(data); } /** * Serialize the given {@link Object} into {@link BinaryData} using json serializer which is available in classpath. * The serializer must implement {@link JsonSerializer} interface. A singleton instance of {@link JsonSerializer} * is kept for this class to use. If {@code null} data is provided , it will be converted into empty byte array. * * @param data The {@link Object} which needs to be serialized into bytes. * @throws IllegalStateException If a {@link JsonSerializer} cannot be found on the classpath. * @return {@link BinaryData} representing binary data. * * @see JsonSerializer */ public static BinaryData fromObject(Object data) { if (Objects.isNull(data)) { return new BinaryData(EMPTY_DATA); } final ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); getDefaultSerializer().serialize(outputStream, data); return new BinaryData(outputStream.toByteArray()); } /** * Serialize the given {@link Object} into {@link BinaryData} using the provided {@link ObjectSerializer}. * If {@code null} data is provided , it will be converted into empty byte array. * * @param data The {@link Object} which needs to be serialized into bytes. * @param serializer to use for serializing the object. * @throws NullPointerException If {@code serializer} is null. * @return {@link BinaryData} representing binary data. */ public static BinaryData fromObject(Object data, ObjectSerializer serializer) { if (Objects.isNull(data)) { return new BinaryData(EMPTY_DATA); } Objects.requireNonNull(serializer, "'serializer' cannot be null."); final ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); serializer.serialize(outputStream, data); return new BinaryData(outputStream.toByteArray()); } /** * Serialize the given {@link Object} into {@link Mono} {@link BinaryData} using the provided * {@link ObjectSerializer}. If {@code null} data is provided , it will be converted into empty byte array. * * @param data The {@link Object} which needs to be serialized into bytes. * @param serializer to use for serializing the object. * @throws NullPointerException If {@code serializer} is null. * @return {@link Mono} of {@link BinaryData} representing the binary data. */ public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) { return Mono.fromCallable(() -> fromObject(data, serializer)); } /** * Provides byte array representation of this {@link BinaryData} object. * * @return byte array representation of the the data. */ public byte[] toBytes() { return Arrays.copyOf(this.data, this.data.length); } /** * Provides {@link String} representation of this {@link BinaryData} object. The bytes are converted into * {@link String} using {@link StandardCharsets * * @return {@link String} representation of the data. */ public String toString() { return new String(this.data, StandardCharsets.UTF_8); } /** * Deserialize the bytes into the {@link Object} of given type by applying the provided {@link ObjectSerializer} on * the data. * * @param clazz representing the type of the Object. * @param serializer to use deserialize data into type. * @param <T> Generic type that the data is deserialized into. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) { Objects.requireNonNull(clazz, "'clazz' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); TypeReference<T> ref = TypeReference.createInstance(clazz); InputStream jsonStream = new ByteArrayInputStream(this.data); return serializer.deserialize(jsonStream, ref); } /** * Return a {@link Mono} by deserialize the bytes into the {@link Object} of given type after applying the provided * {@link ObjectSerializer} on the {@link BinaryData}. * * <p><strong>Gets the specified object</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.to * * @param clazz representing the type of the Object. * @param serializer to use deserialize data into type. * @param <T> Generic type that the data is deserialized into. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) { return Mono.fromCallable(() -> toObject(clazz, serializer)); } /** * Deserialize the bytes into the {@link Object} of given type by using json serializer which is available in * classpath. The serializer must implement {@link JsonSerializer} interface. A singleton instance of * {@link JsonSerializer} is kept for this class to use. * * @param clazz representing the type of the Object. * @param <T> Generic type that the data is deserialized into. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> T toObject(Class<T> clazz) { Objects.requireNonNull(clazz, "'clazz' cannot be null."); TypeReference<T> ref = TypeReference.createInstance(clazz); InputStream jsonStream = new ByteArrayInputStream(this.data); return getDefaultSerializer().deserialize(jsonStream, ref); } /** * Return a {@link Mono} by deserialize the bytes into the {@link Object} of given type after applying the Json * serializer found on classpath. * * <p><strong>Gets the specified object</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.to * * @param clazz representing the type of the Object. * @param <T> Generic type that the data is deserialized into. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> Mono<T> toObjectAsync(Class<T> clazz) { return Mono.fromCallable(() -> toObject(clazz)); } private static JsonSerializer getDefaultSerializer() { if (defaultJsonSerializer == null) { defaultJsonSerializer = JsonSerializerProviders.createInstance(); } return defaultJsonSerializer; } }
class BinaryData { private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class); private static final BinaryData EMPTY_DATA = new BinaryData(new byte[0]); private static final Object LOCK = new Object(); private final byte[] data; private static volatile JsonSerializer defaultJsonSerializer; /** * Create an instance of {@link BinaryData} from the given data. * * @param data to represent as bytes. */ BinaryData(byte[] data) { this.data = data; } /** * Creates a {@link BinaryData} instance with given {@link InputStream} as source of data. The {@link InputStream} * is not closed by this function. * * <p><strong>Create an instance from InputStream</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param inputStream to read bytes from. * @throws UncheckedIOException If any error in reading from {@link InputStream}. * @throws NullPointerException If {@code inputStream} is null. * @return {@link BinaryData} representing the binary data. */ public static BinaryData fromStream(InputStream inputStream) { if (Objects.isNull(inputStream)) { return EMPTY_DATA; } final int bufferSize = 1024; try { ByteArrayOutputStream dataOutputBuffer = new ByteArrayOutputStream(); int nRead; byte[] data = new byte[bufferSize]; while ((nRead = inputStream.read(data, 0, data.length)) != -1) { dataOutputBuffer.write(data, 0, nRead); } dataOutputBuffer.flush(); return new BinaryData(dataOutputBuffer.toByteArray()); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } } /** * Asynchronously creates a {@link BinaryData} instance with the given {@link InputStream} as source of data. The * {@link InputStream} is not closed by this function. If the {@link InputStream} is {@code null}, an empty * {@link BinaryData} will be returned. * * @param inputStream to read bytes from. * @return {@link Mono} of {@link BinaryData} representing the binary data. */ public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) { return Mono.fromCallable(() -> fromStream(inputStream)); } /** * Creates a {@link BinaryData} instance with given {@link Flux} of {@link ByteBuffer} as source of data. It will * collect all the bytes from {@link ByteBuffer} into {@link BinaryData}. If the {@link Flux} is {@code null}, an * empty {@link BinaryData} will be returned. * * <p><strong>Create an instance from String</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param data to use. * @return {@link Mono} of {@link BinaryData} representing binary data. */ public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) { if (Objects.isNull(data)) { return Mono.just(EMPTY_DATA); } return FluxUtil.collectBytesInByteBufferStream(data) .flatMap(bytes -> Mono.just(new BinaryData(bytes))); } /** * Creates a {@link BinaryData} instance with given data. The {@link String} is converted into bytes using UTF_8 * character set. If the String is {@code null}, an empty {@link BinaryData} will be returned. * * @param data to use. * @return {@link BinaryData} representing binary data. */ /** * Creates a {@link BinaryData} instance with given byte array data. If the byte array is {@code null}, an empty * {@link BinaryData} will be returned. * * @param data to use. * @return {@link BinaryData} representing the binary data. */ public static BinaryData fromBytes(byte[] data) { if (Objects.isNull(data) || data.length == 0) { return EMPTY_DATA; } return new BinaryData(Arrays.copyOf(data, data.length)); } /** * Serialize the given {@link Object} into {@link BinaryData} using json serializer which is available on classpath. * The serializer on classpath must implement {@link JsonSerializer} interface. If the given Object is {@code null}, * an empty {@link BinaryData} will be returned. * <p><strong>Code sample</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from.default.serializer * @param data The {@link Object} which needs to be serialized into bytes. * @throws IllegalStateException If a {@link JsonSerializer} cannot be found on the classpath. * @return {@link BinaryData} representing the JSON serialized object. * * @see JsonSerializer * @see <a href="ObjectSerializer" target="_blank">More about serialization</a> */ public static BinaryData fromObject(Object data) { if (Objects.isNull(data)) { return EMPTY_DATA; } final ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); getDefaultSerializer().serialize(outputStream, data); return new BinaryData(outputStream.toByteArray()); } /** * Serialize the given {@link Object} into {@link BinaryData} using the provided {@link ObjectSerializer}. * If the Object is {@code null}, an empty {@link BinaryData} will be returned. * <p>You can provide your custom implementation of {@link ObjectSerializer} interface or use one provided in azure * sdk by adding them as dependency. These implementations could be found at * <a href="https: * and <a href="https: * * <p><strong>Create an instance from Object</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.from * * @param data The {@link Object} which needs to be serialized into bytes. * @param serializer to use for serializing the object. * @throws NullPointerException If {@code serializer} is null. * @return {@link BinaryData} representing binary data. * @see ObjectSerializer * @see JsonSerializer * @see <a href="https: */ public static BinaryData fromObject(Object data, ObjectSerializer serializer) { if (Objects.isNull(data)) { return EMPTY_DATA; } Objects.requireNonNull(serializer, "'serializer' cannot be null."); final ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); serializer.serialize(outputStream, data); return new BinaryData(outputStream.toByteArray()); } /** * Serialize the given {@link Object} into {@link Mono} {@link BinaryData} using the provided * {@link ObjectSerializer}. If the Object is {@code null}, an empty {@link BinaryData} will be returned. * * <p>You can provide your custom implementation of {@link ObjectSerializer} interface or use one provided in azure * sdk by adding them as dependency. These implementations could be found at * <a href="https: * and <a href="https: * * @param data The {@link Object} which needs to be serialized into bytes. * @param serializer to use for serializing the object. * @throws NullPointerException If {@code serializer} is null. * @return {@link Mono} of {@link BinaryData} representing the binary data. * @see ObjectSerializer * @see <a href="https: */ public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) { if (Objects.isNull(serializer)) { return monoError(LOGGER, new NullPointerException("'serializer' cannot be null.")); } return Mono.fromCallable(() -> fromObject(data, serializer)); } /** * Provides byte array representation of this {@link BinaryData} object. * * @return byte array representation of the the data. */ public byte[] toBytes() { return Arrays.copyOf(this.data, this.data.length); } /** * Provides {@link String} representation of this {@link BinaryData} object. The bytes are converted into * {@link String} using the UTF-8 character set. * * @return {@link String} representation of the data. */ public String toString() { return new String(this.data, StandardCharsets.UTF_8); } /** * Deserialize the bytes into the {@link Object} of given type by applying the provided {@link ObjectSerializer} on * the data. * * <p>You can provide your custom implementation of {@link ObjectSerializer} interface or use one provided in azure * sdk by adding them as dependency. These implementations could be found at * <a href="https: * and <a href="https: * * <p><strong>Code sample</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.to * @param clazz representing the type of the Object. * @param serializer to use deserialize data into type. * @param <T> Generic type that the data is deserialized into. * @throws NullPointerException If {@code serializer} or {@code clazz} is null. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) { Objects.requireNonNull(clazz, "'clazz' cannot be null."); Objects.requireNonNull(serializer, "'serializer' cannot be null."); TypeReference<T> ref = TypeReference.createInstance(clazz); InputStream jsonStream = new ByteArrayInputStream(this.data); return serializer.deserialize(jsonStream, ref); } /** * Return a {@link Mono} by deserializing the bytes into the {@link Object} of given type after applying the * provided {@link ObjectSerializer} on the {@link BinaryData}. * * <p>You can provide your custom implementation of {@link ObjectSerializer} interface or use one provided in azure * sdk by adding them as dependency. These implementations could be found at * <a href="https: * and <a href="https: * * <p><strong>Gets the specified object</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.to * * @param clazz representing the type of the Object. * @param serializer to use deserialize data into type. * @param <T> Generic type that the data is deserialized into. * @throws NullPointerException If {@code clazz} or {@code serializer} is null. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) { if (Objects.isNull(clazz)) { return monoError(LOGGER, new NullPointerException("'clazz' cannot be null.")); } else if (Objects.isNull(serializer)) { return monoError(LOGGER, new NullPointerException("'serializer' cannot be null.")); } return Mono.fromCallable(() -> toObject(clazz, serializer)); } /** * Deserialize the bytes into the {@link Object} of given type by using json serializer which is available in * classpath. The serializer must implement {@link JsonSerializer} interface. A singleton instance of * {@link JsonSerializer} is kept for this class to use. * * @param clazz representing the type of the Object. * @param <T> Generic type that the data is deserialized into. * @throws NullPointerException If {@code clazz} is null. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> T toObject(Class<T> clazz) { Objects.requireNonNull(clazz, "'clazz' cannot be null."); TypeReference<T> ref = TypeReference.createInstance(clazz); InputStream jsonStream = new ByteArrayInputStream(this.data); return getDefaultSerializer().deserialize(jsonStream, ref); } /** * Return a {@link Mono} by deserializing the bytes into the {@link Object} of given type after applying the Json * serializer found on classpath. * * <p><strong>Gets the specified object</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.to * * @param clazz representing the type of the Object. * @param <T> Generic type that the data is deserialized into. * @throws NullPointerException If {@code clazz} is null. * @return The {@link Object} of given type after deserializing the bytes. */ public <T> Mono<T> toObjectAsync(Class<T> clazz) { if (Objects.isNull(clazz)) { return monoError(LOGGER, new NullPointerException("'clazz' cannot be null.")); } return Mono.fromCallable(() -> toObject(clazz)); } /** * Provides {@link InputStream} for the data represented by this {@link BinaryData} object. * * <p><strong>Get InputStream from BinaryData</strong></p> * {@codesnippet com.azure.core.experimental.util.BinaryDocument.to * * @return {@link InputStream} representing the binary data. */ public InputStream toStream() { return new ByteArrayInputStream(this.data); } /* This will ensure lazy instantiation to avoid hard dependency on Json Serializer. */ private static JsonSerializer getDefaultSerializer() { if (defaultJsonSerializer == null) { synchronized (LOCK) { if (defaultJsonSerializer == null) { defaultJsonSerializer = JsonSerializerProviders.createInstance(); } } } return defaultJsonSerializer; } }
could you please explain the scenario which this bug shows itself? why do we need this?
void encode(final ByteBuf out) { final int expectedLength = RntbdRequestFrame.LENGTH + this.headers.computeLength(); final int start = out.writerIndex(); out.writeIntLE(expectedLength); this.frame.encode(out); this.headers.encode(out); checkState(out.writerIndex() - start == expectedLength, "encoding error: {\"expectedLength\": %s, \"observedLength\": %s}", expectedLength, out.writerIndex() - expectedLength); if (this.payload.length > 0) { out.writeIntLE(this.payload.length); out.writeBytes(this.payload); } }
final int start = out.writerIndex();
void encode(final ByteBuf out) { final int expectedLength = RntbdRequestFrame.LENGTH + this.headers.computeLength(); final int start = out.writerIndex(); out.writeIntLE(expectedLength); this.frame.encode(out); this.headers.encode(out); final int observedLength = out.writerIndex() - start; checkState(observedLength == expectedLength, "encoding error: {\"expectedLength\": %s, \"observedLength\": %s}", expectedLength, observedLength); if (this.payload.length > 0) { out.writeIntLE(this.payload.length); out.writeBytes(this.payload); } }
class RntbdRequest { private static final byte[] EMPTY_BYTE_ARRAY = {}; private final RntbdRequestFrame frame; private final RntbdRequestHeaders headers; private final byte[] payload; private RntbdRequest(final RntbdRequestFrame frame, final RntbdRequestHeaders headers, final byte[] payload) { checkNotNull(frame, "frame"); checkNotNull(headers, "headers"); this.frame = frame; this.headers = headers; this.payload = payload == null ? EMPTY_BYTE_ARRAY : payload; } public UUID getActivityId() { return this.frame.getActivityId(); } @JsonIgnore @SuppressWarnings("unchecked") public <T> T getHeader(final RntbdRequestHeader header) { return (T) this.headers.get(header).getValue(); } public Long getTransportRequestId() { return this.getHeader(RntbdRequestHeader.TransportRequestID); } public static RntbdRequest decode(final ByteBuf in) { final int resourceOperationCode = in.getInt(in.readerIndex() + Integer.BYTES); if (resourceOperationCode == 0) { final String reason = String.format("resourceOperationCode=0x%08X", resourceOperationCode); throw new IllegalStateException(reason); } final int start = in.readerIndex(); final int expectedLength = in.readIntLE(); final RntbdRequestFrame header = RntbdRequestFrame.decode(in); final RntbdRequestHeaders metadata = RntbdRequestHeaders.decode(in); final ByteBuf payloadBuf = in.readSlice(expectedLength - (in.readerIndex() - start)); final int observedLength = in.readerIndex() - start; if (observedLength != expectedLength) { final String reason = Strings.lenientFormat("expectedLength=%s, observedLength=%s", expectedLength, observedLength); throw new IllegalStateException(reason); } final byte[] payload = new byte[payloadBuf.readableBytes()]; payloadBuf.readBytes(payload); in.discardReadBytes(); return new RntbdRequest(header, metadata, payload); } public static RntbdRequest from(final RntbdRequestArgs args) { final RxDocumentServiceRequest serviceRequest = args.serviceRequest(); final RntbdRequestFrame frame = new RntbdRequestFrame( args.activityId(), serviceRequest.getOperationType(), serviceRequest.getResourceType()); final RntbdRequestHeaders headers = new RntbdRequestHeaders(args, frame); return new RntbdRequest(frame, headers, serviceRequest.getContentAsByteArray()); } }
class RntbdRequest { private static final byte[] EMPTY_BYTE_ARRAY = {}; private final RntbdRequestFrame frame; private final RntbdRequestHeaders headers; private final byte[] payload; private RntbdRequest(final RntbdRequestFrame frame, final RntbdRequestHeaders headers, final byte[] payload) { checkNotNull(frame, "frame"); checkNotNull(headers, "headers"); this.frame = frame; this.headers = headers; this.payload = payload == null ? EMPTY_BYTE_ARRAY : payload; } public UUID getActivityId() { return this.frame.getActivityId(); } @JsonIgnore @SuppressWarnings("unchecked") public <T> T getHeader(final RntbdRequestHeader header) { return (T) this.headers.get(header).getValue(); } public Long getTransportRequestId() { return this.getHeader(RntbdRequestHeader.TransportRequestID); } public static RntbdRequest decode(final ByteBuf in) { final int resourceOperationCode = in.getInt(in.readerIndex() + Integer.BYTES); if (resourceOperationCode == 0) { final String reason = String.format("resourceOperationCode=0x%08X", resourceOperationCode); throw new IllegalStateException(reason); } final int start = in.readerIndex(); final int expectedLength = in.readIntLE(); final RntbdRequestFrame header = RntbdRequestFrame.decode(in); final RntbdRequestHeaders metadata = RntbdRequestHeaders.decode(in); final ByteBuf payloadBuf = in.readSlice(expectedLength - (in.readerIndex() - start)); final int observedLength = in.readerIndex() - start; if (observedLength != expectedLength) { final String reason = Strings.lenientFormat("expectedLength=%s, observedLength=%s", expectedLength, observedLength); throw new IllegalStateException(reason); } final byte[] payload = new byte[payloadBuf.readableBytes()]; payloadBuf.readBytes(payload); in.discardReadBytes(); return new RntbdRequest(header, metadata, payload); } public static RntbdRequest from(final RntbdRequestArgs args) { final RxDocumentServiceRequest serviceRequest = args.serviceRequest(); final RntbdRequestFrame frame = new RntbdRequestFrame( args.activityId(), serviceRequest.getOperationType(), serviceRequest.getResourceType()); final RntbdRequestHeaders headers = new RntbdRequestHeaders(args, frame); return new RntbdRequest(frame, headers, serviceRequest.getContentAsByteArray()); } }
This change was originally found and fixed in PR https://github.com/Azure/azure-sdk-for-java/pull/14697/files#diff-cdd54a91cbc899239eec06998abac39eL81. We have not came across any issue today since the writerIndex and readerIndex should be the same. But the writerIndex is the more correct value we should use. But also feel it may be a risk to depend on the underlying ByteBuf implementation details. So it is better to change to use the right value.
void encode(final ByteBuf out) { final int expectedLength = RntbdRequestFrame.LENGTH + this.headers.computeLength(); final int start = out.writerIndex(); out.writeIntLE(expectedLength); this.frame.encode(out); this.headers.encode(out); checkState(out.writerIndex() - start == expectedLength, "encoding error: {\"expectedLength\": %s, \"observedLength\": %s}", expectedLength, out.writerIndex() - expectedLength); if (this.payload.length > 0) { out.writeIntLE(this.payload.length); out.writeBytes(this.payload); } }
final int start = out.writerIndex();
void encode(final ByteBuf out) { final int expectedLength = RntbdRequestFrame.LENGTH + this.headers.computeLength(); final int start = out.writerIndex(); out.writeIntLE(expectedLength); this.frame.encode(out); this.headers.encode(out); final int observedLength = out.writerIndex() - start; checkState(observedLength == expectedLength, "encoding error: {\"expectedLength\": %s, \"observedLength\": %s}", expectedLength, observedLength); if (this.payload.length > 0) { out.writeIntLE(this.payload.length); out.writeBytes(this.payload); } }
class RntbdRequest { private static final byte[] EMPTY_BYTE_ARRAY = {}; private final RntbdRequestFrame frame; private final RntbdRequestHeaders headers; private final byte[] payload; private RntbdRequest(final RntbdRequestFrame frame, final RntbdRequestHeaders headers, final byte[] payload) { checkNotNull(frame, "frame"); checkNotNull(headers, "headers"); this.frame = frame; this.headers = headers; this.payload = payload == null ? EMPTY_BYTE_ARRAY : payload; } public UUID getActivityId() { return this.frame.getActivityId(); } @JsonIgnore @SuppressWarnings("unchecked") public <T> T getHeader(final RntbdRequestHeader header) { return (T) this.headers.get(header).getValue(); } public Long getTransportRequestId() { return this.getHeader(RntbdRequestHeader.TransportRequestID); } public static RntbdRequest decode(final ByteBuf in) { final int resourceOperationCode = in.getInt(in.readerIndex() + Integer.BYTES); if (resourceOperationCode == 0) { final String reason = String.format("resourceOperationCode=0x%08X", resourceOperationCode); throw new IllegalStateException(reason); } final int start = in.readerIndex(); final int expectedLength = in.readIntLE(); final RntbdRequestFrame header = RntbdRequestFrame.decode(in); final RntbdRequestHeaders metadata = RntbdRequestHeaders.decode(in); final ByteBuf payloadBuf = in.readSlice(expectedLength - (in.readerIndex() - start)); final int observedLength = in.readerIndex() - start; if (observedLength != expectedLength) { final String reason = Strings.lenientFormat("expectedLength=%s, observedLength=%s", expectedLength, observedLength); throw new IllegalStateException(reason); } final byte[] payload = new byte[payloadBuf.readableBytes()]; payloadBuf.readBytes(payload); in.discardReadBytes(); return new RntbdRequest(header, metadata, payload); } public static RntbdRequest from(final RntbdRequestArgs args) { final RxDocumentServiceRequest serviceRequest = args.serviceRequest(); final RntbdRequestFrame frame = new RntbdRequestFrame( args.activityId(), serviceRequest.getOperationType(), serviceRequest.getResourceType()); final RntbdRequestHeaders headers = new RntbdRequestHeaders(args, frame); return new RntbdRequest(frame, headers, serviceRequest.getContentAsByteArray()); } }
class RntbdRequest { private static final byte[] EMPTY_BYTE_ARRAY = {}; private final RntbdRequestFrame frame; private final RntbdRequestHeaders headers; private final byte[] payload; private RntbdRequest(final RntbdRequestFrame frame, final RntbdRequestHeaders headers, final byte[] payload) { checkNotNull(frame, "frame"); checkNotNull(headers, "headers"); this.frame = frame; this.headers = headers; this.payload = payload == null ? EMPTY_BYTE_ARRAY : payload; } public UUID getActivityId() { return this.frame.getActivityId(); } @JsonIgnore @SuppressWarnings("unchecked") public <T> T getHeader(final RntbdRequestHeader header) { return (T) this.headers.get(header).getValue(); } public Long getTransportRequestId() { return this.getHeader(RntbdRequestHeader.TransportRequestID); } public static RntbdRequest decode(final ByteBuf in) { final int resourceOperationCode = in.getInt(in.readerIndex() + Integer.BYTES); if (resourceOperationCode == 0) { final String reason = String.format("resourceOperationCode=0x%08X", resourceOperationCode); throw new IllegalStateException(reason); } final int start = in.readerIndex(); final int expectedLength = in.readIntLE(); final RntbdRequestFrame header = RntbdRequestFrame.decode(in); final RntbdRequestHeaders metadata = RntbdRequestHeaders.decode(in); final ByteBuf payloadBuf = in.readSlice(expectedLength - (in.readerIndex() - start)); final int observedLength = in.readerIndex() - start; if (observedLength != expectedLength) { final String reason = Strings.lenientFormat("expectedLength=%s, observedLength=%s", expectedLength, observedLength); throw new IllegalStateException(reason); } final byte[] payload = new byte[payloadBuf.readableBytes()]; payloadBuf.readBytes(payload); in.discardReadBytes(); return new RntbdRequest(header, metadata, payload); } public static RntbdRequest from(final RntbdRequestArgs args) { final RxDocumentServiceRequest serviceRequest = args.serviceRequest(); final RntbdRequestFrame frame = new RntbdRequestFrame( args.activityId(), serviceRequest.getOperationType(), serviceRequest.getResourceType()); final RntbdRequestHeaders headers = new RntbdRequestHeaders(args, frame); return new RntbdRequest(frame, headers, serviceRequest.getContentAsByteArray()); } }
out.writerIndex() - start instead of out.writerIndex() - expectedLength
void encode(final ByteBuf out) { final int expectedLength = RntbdRequestFrame.LENGTH + this.headers.computeLength(); final int start = out.writerIndex(); out.writeIntLE(expectedLength); this.frame.encode(out); this.headers.encode(out); checkState(out.writerIndex() - start == expectedLength, "encoding error: {\"expectedLength\": %s, \"observedLength\": %s}", expectedLength, out.writerIndex() - expectedLength); if (this.payload.length > 0) { out.writeIntLE(this.payload.length); out.writeBytes(this.payload); } }
out.writerIndex() - expectedLength);
void encode(final ByteBuf out) { final int expectedLength = RntbdRequestFrame.LENGTH + this.headers.computeLength(); final int start = out.writerIndex(); out.writeIntLE(expectedLength); this.frame.encode(out); this.headers.encode(out); final int observedLength = out.writerIndex() - start; checkState(observedLength == expectedLength, "encoding error: {\"expectedLength\": %s, \"observedLength\": %s}", expectedLength, observedLength); if (this.payload.length > 0) { out.writeIntLE(this.payload.length); out.writeBytes(this.payload); } }
class RntbdRequest { private static final byte[] EMPTY_BYTE_ARRAY = {}; private final RntbdRequestFrame frame; private final RntbdRequestHeaders headers; private final byte[] payload; private RntbdRequest(final RntbdRequestFrame frame, final RntbdRequestHeaders headers, final byte[] payload) { checkNotNull(frame, "frame"); checkNotNull(headers, "headers"); this.frame = frame; this.headers = headers; this.payload = payload == null ? EMPTY_BYTE_ARRAY : payload; } public UUID getActivityId() { return this.frame.getActivityId(); } @JsonIgnore @SuppressWarnings("unchecked") public <T> T getHeader(final RntbdRequestHeader header) { return (T) this.headers.get(header).getValue(); } public Long getTransportRequestId() { return this.getHeader(RntbdRequestHeader.TransportRequestID); } public static RntbdRequest decode(final ByteBuf in) { final int resourceOperationCode = in.getInt(in.readerIndex() + Integer.BYTES); if (resourceOperationCode == 0) { final String reason = String.format("resourceOperationCode=0x%08X", resourceOperationCode); throw new IllegalStateException(reason); } final int start = in.readerIndex(); final int expectedLength = in.readIntLE(); final RntbdRequestFrame header = RntbdRequestFrame.decode(in); final RntbdRequestHeaders metadata = RntbdRequestHeaders.decode(in); final ByteBuf payloadBuf = in.readSlice(expectedLength - (in.readerIndex() - start)); final int observedLength = in.readerIndex() - start; if (observedLength != expectedLength) { final String reason = Strings.lenientFormat("expectedLength=%s, observedLength=%s", expectedLength, observedLength); throw new IllegalStateException(reason); } final byte[] payload = new byte[payloadBuf.readableBytes()]; payloadBuf.readBytes(payload); in.discardReadBytes(); return new RntbdRequest(header, metadata, payload); } public static RntbdRequest from(final RntbdRequestArgs args) { final RxDocumentServiceRequest serviceRequest = args.serviceRequest(); final RntbdRequestFrame frame = new RntbdRequestFrame( args.activityId(), serviceRequest.getOperationType(), serviceRequest.getResourceType()); final RntbdRequestHeaders headers = new RntbdRequestHeaders(args, frame); return new RntbdRequest(frame, headers, serviceRequest.getContentAsByteArray()); } }
class RntbdRequest { private static final byte[] EMPTY_BYTE_ARRAY = {}; private final RntbdRequestFrame frame; private final RntbdRequestHeaders headers; private final byte[] payload; private RntbdRequest(final RntbdRequestFrame frame, final RntbdRequestHeaders headers, final byte[] payload) { checkNotNull(frame, "frame"); checkNotNull(headers, "headers"); this.frame = frame; this.headers = headers; this.payload = payload == null ? EMPTY_BYTE_ARRAY : payload; } public UUID getActivityId() { return this.frame.getActivityId(); } @JsonIgnore @SuppressWarnings("unchecked") public <T> T getHeader(final RntbdRequestHeader header) { return (T) this.headers.get(header).getValue(); } public Long getTransportRequestId() { return this.getHeader(RntbdRequestHeader.TransportRequestID); } public static RntbdRequest decode(final ByteBuf in) { final int resourceOperationCode = in.getInt(in.readerIndex() + Integer.BYTES); if (resourceOperationCode == 0) { final String reason = String.format("resourceOperationCode=0x%08X", resourceOperationCode); throw new IllegalStateException(reason); } final int start = in.readerIndex(); final int expectedLength = in.readIntLE(); final RntbdRequestFrame header = RntbdRequestFrame.decode(in); final RntbdRequestHeaders metadata = RntbdRequestHeaders.decode(in); final ByteBuf payloadBuf = in.readSlice(expectedLength - (in.readerIndex() - start)); final int observedLength = in.readerIndex() - start; if (observedLength != expectedLength) { final String reason = Strings.lenientFormat("expectedLength=%s, observedLength=%s", expectedLength, observedLength); throw new IllegalStateException(reason); } final byte[] payload = new byte[payloadBuf.readableBytes()]; payloadBuf.readBytes(payload); in.discardReadBytes(); return new RntbdRequest(header, metadata, payload); } public static RntbdRequest from(final RntbdRequestArgs args) { final RxDocumentServiceRequest serviceRequest = args.serviceRequest(); final RntbdRequestFrame frame = new RntbdRequestFrame( args.activityId(), serviceRequest.getOperationType(), serviceRequest.getResourceType()); final RntbdRequestHeaders headers = new RntbdRequestHeaders(args, frame); return new RntbdRequest(frame, headers, serviceRequest.getContentAsByteArray()); } }
Also I would prefer taking a snapshot of out.writerIndex() to be used for the check state condition and the output message to address any thread-safety concerns
void encode(final ByteBuf out) { final int expectedLength = RntbdRequestFrame.LENGTH + this.headers.computeLength(); final int start = out.writerIndex(); out.writeIntLE(expectedLength); this.frame.encode(out); this.headers.encode(out); checkState(out.writerIndex() - start == expectedLength, "encoding error: {\"expectedLength\": %s, \"observedLength\": %s}", expectedLength, out.writerIndex() - expectedLength); if (this.payload.length > 0) { out.writeIntLE(this.payload.length); out.writeBytes(this.payload); } }
out.writerIndex() - expectedLength);
void encode(final ByteBuf out) { final int expectedLength = RntbdRequestFrame.LENGTH + this.headers.computeLength(); final int start = out.writerIndex(); out.writeIntLE(expectedLength); this.frame.encode(out); this.headers.encode(out); final int observedLength = out.writerIndex() - start; checkState(observedLength == expectedLength, "encoding error: {\"expectedLength\": %s, \"observedLength\": %s}", expectedLength, observedLength); if (this.payload.length > 0) { out.writeIntLE(this.payload.length); out.writeBytes(this.payload); } }
class RntbdRequest { private static final byte[] EMPTY_BYTE_ARRAY = {}; private final RntbdRequestFrame frame; private final RntbdRequestHeaders headers; private final byte[] payload; private RntbdRequest(final RntbdRequestFrame frame, final RntbdRequestHeaders headers, final byte[] payload) { checkNotNull(frame, "frame"); checkNotNull(headers, "headers"); this.frame = frame; this.headers = headers; this.payload = payload == null ? EMPTY_BYTE_ARRAY : payload; } public UUID getActivityId() { return this.frame.getActivityId(); } @JsonIgnore @SuppressWarnings("unchecked") public <T> T getHeader(final RntbdRequestHeader header) { return (T) this.headers.get(header).getValue(); } public Long getTransportRequestId() { return this.getHeader(RntbdRequestHeader.TransportRequestID); } public static RntbdRequest decode(final ByteBuf in) { final int resourceOperationCode = in.getInt(in.readerIndex() + Integer.BYTES); if (resourceOperationCode == 0) { final String reason = String.format("resourceOperationCode=0x%08X", resourceOperationCode); throw new IllegalStateException(reason); } final int start = in.readerIndex(); final int expectedLength = in.readIntLE(); final RntbdRequestFrame header = RntbdRequestFrame.decode(in); final RntbdRequestHeaders metadata = RntbdRequestHeaders.decode(in); final ByteBuf payloadBuf = in.readSlice(expectedLength - (in.readerIndex() - start)); final int observedLength = in.readerIndex() - start; if (observedLength != expectedLength) { final String reason = Strings.lenientFormat("expectedLength=%s, observedLength=%s", expectedLength, observedLength); throw new IllegalStateException(reason); } final byte[] payload = new byte[payloadBuf.readableBytes()]; payloadBuf.readBytes(payload); in.discardReadBytes(); return new RntbdRequest(header, metadata, payload); } public static RntbdRequest from(final RntbdRequestArgs args) { final RxDocumentServiceRequest serviceRequest = args.serviceRequest(); final RntbdRequestFrame frame = new RntbdRequestFrame( args.activityId(), serviceRequest.getOperationType(), serviceRequest.getResourceType()); final RntbdRequestHeaders headers = new RntbdRequestHeaders(args, frame); return new RntbdRequest(frame, headers, serviceRequest.getContentAsByteArray()); } }
class RntbdRequest { private static final byte[] EMPTY_BYTE_ARRAY = {}; private final RntbdRequestFrame frame; private final RntbdRequestHeaders headers; private final byte[] payload; private RntbdRequest(final RntbdRequestFrame frame, final RntbdRequestHeaders headers, final byte[] payload) { checkNotNull(frame, "frame"); checkNotNull(headers, "headers"); this.frame = frame; this.headers = headers; this.payload = payload == null ? EMPTY_BYTE_ARRAY : payload; } public UUID getActivityId() { return this.frame.getActivityId(); } @JsonIgnore @SuppressWarnings("unchecked") public <T> T getHeader(final RntbdRequestHeader header) { return (T) this.headers.get(header).getValue(); } public Long getTransportRequestId() { return this.getHeader(RntbdRequestHeader.TransportRequestID); } public static RntbdRequest decode(final ByteBuf in) { final int resourceOperationCode = in.getInt(in.readerIndex() + Integer.BYTES); if (resourceOperationCode == 0) { final String reason = String.format("resourceOperationCode=0x%08X", resourceOperationCode); throw new IllegalStateException(reason); } final int start = in.readerIndex(); final int expectedLength = in.readIntLE(); final RntbdRequestFrame header = RntbdRequestFrame.decode(in); final RntbdRequestHeaders metadata = RntbdRequestHeaders.decode(in); final ByteBuf payloadBuf = in.readSlice(expectedLength - (in.readerIndex() - start)); final int observedLength = in.readerIndex() - start; if (observedLength != expectedLength) { final String reason = Strings.lenientFormat("expectedLength=%s, observedLength=%s", expectedLength, observedLength); throw new IllegalStateException(reason); } final byte[] payload = new byte[payloadBuf.readableBytes()]; payloadBuf.readBytes(payload); in.discardReadBytes(); return new RntbdRequest(header, metadata, payload); } public static RntbdRequest from(final RntbdRequestArgs args) { final RxDocumentServiceRequest serviceRequest = args.serviceRequest(); final RntbdRequestFrame frame = new RntbdRequestFrame( args.activityId(), serviceRequest.getOperationType(), serviceRequest.getResourceType()); final RntbdRequestHeaders headers = new RntbdRequestHeaders(args, frame); return new RntbdRequest(frame, headers, serviceRequest.getContentAsByteArray()); } }
please fix the code style. ```suggestion successCount.get() - failureCount.get(), container.getId()); ```
private void createPrePopulatedDocs(int numberOfPreCreatedDocuments) { for (CosmosAsyncContainer container : containers) { AtomicLong successCount = new AtomicLong(0); AtomicLong failureCount = new AtomicLong(0); ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>(); for (int i = 0; i < numberOfPreCreatedDocuments; i++) { String uId = UUID.randomUUID().toString(); PojoizedJson newDoc = BenchmarkHelper.generateDocument(uId, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); Flux<PojoizedJson> obs = container.createItem(newDoc).map(resp -> { PojoizedJson x = resp.getItem(); return x; }).onErrorResume(throwable -> { failureCount.incrementAndGet(); logger.error("Error during pre populating item "+throwable.getMessage()); return Mono.empty(); }).doOnSuccess(pojoizedJson -> { successCount.incrementAndGet(); }).flux(); createDocumentObservables.add(obs); } docsToRead.put(container.getId(), Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block()); logger.info("Finished pre-populating {} documents for container {}", successCount.get()-failureCount.get(), container.getId()); if(failureCount.get() > 0) { logger.info("Failed pre-populating {} documents for container {}", failureCount.get(), container.getId()); } } }
successCount.get()-failureCount.get(), container.getId());
private void createPrePopulatedDocs(int numberOfPreCreatedDocuments) { for (CosmosAsyncContainer container : containers) { AtomicLong successCount = new AtomicLong(0); AtomicLong failureCount = new AtomicLong(0); ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>(); for (int i = 0; i < numberOfPreCreatedDocuments; i++) { String uId = UUID.randomUUID().toString(); PojoizedJson newDoc = BenchmarkHelper.generateDocument(uId, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); Flux<PojoizedJson> obs = container.createItem(newDoc).map(resp -> { PojoizedJson x = resp.getItem(); return x; }).onErrorResume(throwable -> { failureCount.incrementAndGet(); logger.error("Error during pre populating item ", throwable.getMessage()); return Mono.empty(); }).doOnSuccess(pojoizedJson -> { successCount.incrementAndGet(); }).flux(); createDocumentObservables.add(obs); } docsToRead.put(container.getId(), Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block()); logger.info("Finished pre-populating {} documents for container {}", successCount.get() - failureCount.get(), container.getId()); if (failureCount.get() > 0) { logger.info("Failed pre-populating {} documents for container {}", failureCount.get(), container.getId()); } } }
class AsyncCtlWorkload { private final String PERCENT_PARSING_ERROR = "Unable to parse user provided readWriteQueryPct "; private final String prefixUuidForCreate; private final String dataFieldValue; private final String partitionKey; private final MetricRegistry metricsRegistry = new MetricRegistry(); private final Logger logger; private final CosmosAsyncClient cosmosClient; private final Configuration configuration; private final Map<String, List<PojoizedJson>> docsToRead = new HashMap<>(); private final Semaphore concurrencyControlSemaphore; private final Random random; private Timer readLatency; private Timer writeLatency; private Timer queryLatency; private ScheduledReporter reporter; private Meter readSuccessMeter; private Meter readFailureMeter; private Meter writeSuccessMeter; private Meter writeFailureMeter; private Meter querySuccessMeter; private Meter queryFailureMeter; private CosmosAsyncDatabase cosmosAsyncDatabase; private List<CosmosAsyncContainer> containers = new ArrayList<>(); private List<String> containerToClearAfterTest = new ArrayList<>(); private boolean databaseCreated; private int readPct; private int writePct; private int queryPct; public AsyncCtlWorkload(Configuration cfg) { CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder() .endpoint(cfg.getServiceEndpoint()) .key(cfg.getMasterKey()) .consistencyLevel(cfg.getConsistencyLevel()) .contentResponseOnWriteEnabled(Boolean.parseBoolean(cfg.isContentResponseOnWriteEnabled())); if (cfg.getConnectionMode().equals(ConnectionMode.DIRECT)) { cosmosClientBuilder = cosmosClientBuilder.directMode(DirectConnectionConfig.getDefaultConfig()); } else { GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); gatewayConnectionConfig.setMaxConnectionPoolSize(cfg.getMaxConnectionPoolSize()); cosmosClientBuilder = cosmosClientBuilder.gatewayMode(gatewayConnectionConfig); } cosmosClient = cosmosClientBuilder.buildAsyncClient(); configuration = cfg; logger = LoggerFactory.getLogger(this.getClass()); parsedReadWriteQueryPct(configuration.getReadWriteQueryPct()); createDatabaseAndContainers(configuration); partitionKey = containers.get(0).read().block().getProperties().getPartitionKeyDefinition() .getPaths().iterator().next().split("/")[1]; concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency()); logger.info("PRE-populating {} documents ....", cfg.getNumberOfPreCreatedDocuments()); dataFieldValue = RandomStringUtils.randomAlphabetic(configuration.getDocumentDataFieldSize()); createPrePopulatedDocs(configuration.getNumberOfPreCreatedDocuments()); if (configuration.isEnableJvmStats()) { metricsRegistry.register("gc", new GarbageCollectorMetricSet()); metricsRegistry.register("threads", new CachedThreadStatesGaugeSet(10, TimeUnit.SECONDS)); metricsRegistry.register("memory", new MemoryUsageGaugeSet()); } initializeReporter(cfg); MeterRegistry registry = configuration.getAzureMonitorMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } registry = configuration.getGraphiteMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } prefixUuidForCreate = UUID.randomUUID().toString(); random = new Random(); } public void shutdown() { if (this.databaseCreated) { cosmosAsyncDatabase.delete().block(); logger.info("Deleted temporary database {} created for this test", this.configuration.getDatabaseId()); } else if (containerToClearAfterTest.size() > 0) { for (String id : containerToClearAfterTest) { cosmosAsyncDatabase.getContainer(id).delete().block(); logger.info("Deleted temporary collection {} created for this test", id); } } cosmosClient.close(); } private void performWorkload(BaseSubscriber<Object> documentSubscriber, OperationType type, long i) throws Exception { Flux<? extends Object> obs; CosmosAsyncContainer container = containers.get((int) i % containers.size()); if (type.equals(OperationType.Create)) { PojoizedJson data = BenchmarkHelper.generateDocument(prefixUuidForCreate + i, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); obs = container.createItem(data).flux(); } else if (type.equals(OperationType.Query)) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); String sqlQuery = "Select top 100 * from c order by c._ts"; obs = container.queryItems(sqlQuery, options, PojoizedJson.class).byPage(10); } else { int index = random.nextInt(docsToRead.get(container.getId()).size()); RequestOptions options = new RequestOptions(); String partitionKeyValue = docsToRead.get(container.getId()).get(index).getId(); options.setPartitionKey(new PartitionKey(partitionKeyValue)); obs = container.readItem(docsToRead.get(container.getId()).get(index).getId(), new PartitionKey(partitionKeyValue), PojoizedJson.class) .flux(); } concurrencyControlSemaphore.acquire(); obs.subscribeOn(Schedulers.parallel()).subscribe(documentSubscriber); } public void run() throws Exception { readSuccessMeter = metricsRegistry.meter(" readFailureMeter = metricsRegistry.meter(" writeSuccessMeter = metricsRegistry.meter(" writeFailureMeter = metricsRegistry.meter(" querySuccessMeter = metricsRegistry.meter(" queryFailureMeter = metricsRegistry.meter(" readLatency = metricsRegistry.register("Read Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); writeLatency = metricsRegistry.register("Write Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); queryLatency = metricsRegistry.register("Query Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS); long startTime = System.currentTimeMillis(); AtomicLong count = new AtomicLong(0); long i; int writeRange = readPct + writePct; for (i = 0; BenchmarkHelper.shouldContinue(startTime, i, configuration); i++) { int index = (int) i % 100; if (index < readPct) { BenchmarkRequestSubscriber<Object> readSubscriber = new BenchmarkRequestSubscriber<>(readSuccessMeter, readFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); readSubscriber.context = readLatency.time(); performWorkload(readSubscriber, OperationType.Read, i); } else if (index < writeRange) { BenchmarkRequestSubscriber<Object> writeSubscriber = new BenchmarkRequestSubscriber<>(writeSuccessMeter, writeFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); writeSubscriber.context = writeLatency.time(); performWorkload(writeSubscriber, OperationType.Create, i); } else { BenchmarkRequestSubscriber<Object> querySubscriber = new BenchmarkRequestSubscriber<>(querySuccessMeter, queryFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); querySubscriber.context = queryLatency.time(); performWorkload(querySubscriber, OperationType.Query, i); } } synchronized (count) { while (count.get() < i) { count.wait(); } } long endTime = System.currentTimeMillis(); logger.info("[{}] operations performed in [{}] seconds.", configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); reporter.report(); reporter.close(); } private void parsedReadWriteQueryPct(String readWriteQueryPct) { String[] readWriteQueryPctList = readWriteQueryPct.split(","); if (readWriteQueryPctList.length == 3) { try { if (Integer.valueOf(readWriteQueryPctList[0]) + Integer.valueOf(readWriteQueryPctList[1]) + Integer.valueOf(readWriteQueryPctList[2]) == 100) { readPct = Integer.valueOf(readWriteQueryPctList[0]); writePct = Integer.valueOf(readWriteQueryPctList[1]); queryPct = Integer.valueOf(readWriteQueryPctList[2]); } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } catch (NumberFormatException ex) { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } private void createDatabaseAndContainers(Configuration cfg) { try { cosmosAsyncDatabase = cosmosClient.getDatabase(this.configuration.getDatabaseId()); cosmosAsyncDatabase.read().block(); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosClient.createDatabase(cfg.getDatabaseId(), ThroughputProperties.createManualThroughput(this.configuration.getThroughput())).block(); cosmosAsyncDatabase = cosmosClient.getDatabase(cfg.getDatabaseId()); logger.info("Database {} is created for this test", this.configuration.getDatabaseId()); databaseCreated = true; } else { throw e; } } int numberOfCollection = cfg.getNumberOfCollectionForCtl(); if (numberOfCollection < 1) { numberOfCollection = 1; } for (int i = 1; i <= numberOfCollection; i++) { try { CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); cosmosAsyncContainer.read().block(); containers.add(cosmosAsyncContainer); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosAsyncDatabase.createContainer( this.configuration.getCollectionId() + "_" + i, Configuration.DEFAULT_PARTITION_KEY_PATH ).block(); CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); logger.info("Collection {} is created for this test", this.configuration.getCollectionId() + "_" + i); containers.add(cosmosAsyncContainer); containerToClearAfterTest.add(cosmosAsyncContainer.getId()); } else { throw e; } } } } private void initializeReporter(Configuration configuration) { if (configuration.getGraphiteEndpoint() != null) { final Graphite graphite = new Graphite(new InetSocketAddress( configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort())); reporter = GraphiteReporter.forRegistry(metricsRegistry) .prefixedWith(configuration.getOperationType().name()) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .filter(MetricFilter.ALL) .build(graphite); } else if (configuration.getReportingDirectory() != null) { reporter = CsvReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(configuration.getReportingDirectory()); } else { reporter = ConsoleReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(); } } }
class AsyncCtlWorkload { private final String PERCENT_PARSING_ERROR = "Unable to parse user provided readWriteQueryPct "; private final String prefixUuidForCreate; private final String dataFieldValue; private final String partitionKey; private final MetricRegistry metricsRegistry = new MetricRegistry(); private final Logger logger; private final CosmosAsyncClient cosmosClient; private final Configuration configuration; private final Map<String, List<PojoizedJson>> docsToRead = new HashMap<>(); private final Semaphore concurrencyControlSemaphore; private final Random random; private Timer readLatency; private Timer writeLatency; private Timer queryLatency; private ScheduledReporter reporter; private Meter readSuccessMeter; private Meter readFailureMeter; private Meter writeSuccessMeter; private Meter writeFailureMeter; private Meter querySuccessMeter; private Meter queryFailureMeter; private CosmosAsyncDatabase cosmosAsyncDatabase; private List<CosmosAsyncContainer> containers = new ArrayList<>(); private List<String> containerToClearAfterTest = new ArrayList<>(); private boolean databaseCreated; private int readPct; private int writePct; private int queryPct; public AsyncCtlWorkload(Configuration cfg) { CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder() .endpoint(cfg.getServiceEndpoint()) .key(cfg.getMasterKey()) .consistencyLevel(cfg.getConsistencyLevel()) .contentResponseOnWriteEnabled(Boolean.parseBoolean(cfg.isContentResponseOnWriteEnabled())); if (cfg.getConnectionMode().equals(ConnectionMode.DIRECT)) { cosmosClientBuilder = cosmosClientBuilder.directMode(DirectConnectionConfig.getDefaultConfig()); } else { GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); gatewayConnectionConfig.setMaxConnectionPoolSize(cfg.getMaxConnectionPoolSize()); cosmosClientBuilder = cosmosClientBuilder.gatewayMode(gatewayConnectionConfig); } cosmosClient = cosmosClientBuilder.buildAsyncClient(); configuration = cfg; logger = LoggerFactory.getLogger(this.getClass()); parsedReadWriteQueryPct(configuration.getReadWriteQueryPct()); createDatabaseAndContainers(configuration); partitionKey = containers.get(0).read().block().getProperties().getPartitionKeyDefinition() .getPaths().iterator().next().split("/")[1]; concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency()); logger.info("PRE-populating {} documents ....", cfg.getNumberOfPreCreatedDocuments()); dataFieldValue = RandomStringUtils.randomAlphabetic(configuration.getDocumentDataFieldSize()); createPrePopulatedDocs(configuration.getNumberOfPreCreatedDocuments()); if (configuration.isEnableJvmStats()) { metricsRegistry.register("gc", new GarbageCollectorMetricSet()); metricsRegistry.register("threads", new CachedThreadStatesGaugeSet(10, TimeUnit.SECONDS)); metricsRegistry.register("memory", new MemoryUsageGaugeSet()); } initializeReporter(cfg); MeterRegistry registry = configuration.getAzureMonitorMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } registry = configuration.getGraphiteMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } prefixUuidForCreate = UUID.randomUUID().toString(); random = new Random(); } public void shutdown() { if (this.databaseCreated) { cosmosAsyncDatabase.delete().block(); logger.info("Deleted temporary database {} created for this test", this.configuration.getDatabaseId()); } else if (containerToClearAfterTest.size() > 0) { for (String id : containerToClearAfterTest) { cosmosAsyncDatabase.getContainer(id).delete().block(); logger.info("Deleted temporary collection {} created for this test", id); } } cosmosClient.close(); } private void performWorkload(BaseSubscriber<Object> documentSubscriber, OperationType type, long i) throws Exception { Flux<? extends Object> obs; CosmosAsyncContainer container = containers.get((int) i % containers.size()); if (type.equals(OperationType.Create)) { PojoizedJson data = BenchmarkHelper.generateDocument(prefixUuidForCreate + i, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); obs = container.createItem(data).flux(); } else if (type.equals(OperationType.Query)) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); String sqlQuery = "Select top 100 * from c order by c._ts"; obs = container.queryItems(sqlQuery, options, PojoizedJson.class).byPage(10); } else { int index = random.nextInt(docsToRead.get(container.getId()).size()); RequestOptions options = new RequestOptions(); String partitionKeyValue = docsToRead.get(container.getId()).get(index).getId(); options.setPartitionKey(new PartitionKey(partitionKeyValue)); obs = container.readItem(docsToRead.get(container.getId()).get(index).getId(), new PartitionKey(partitionKeyValue), PojoizedJson.class) .flux(); } concurrencyControlSemaphore.acquire(); obs.subscribeOn(Schedulers.parallel()).subscribe(documentSubscriber); } public void run() throws Exception { readSuccessMeter = metricsRegistry.meter(" readFailureMeter = metricsRegistry.meter(" writeSuccessMeter = metricsRegistry.meter(" writeFailureMeter = metricsRegistry.meter(" querySuccessMeter = metricsRegistry.meter(" queryFailureMeter = metricsRegistry.meter(" readLatency = metricsRegistry.register("Read Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); writeLatency = metricsRegistry.register("Write Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); queryLatency = metricsRegistry.register("Query Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS); long startTime = System.currentTimeMillis(); AtomicLong count = new AtomicLong(0); long i; int writeRange = readPct + writePct; for (i = 0; BenchmarkHelper.shouldContinue(startTime, i, configuration); i++) { int index = (int) i % 100; if (index < readPct) { BenchmarkRequestSubscriber<Object> readSubscriber = new BenchmarkRequestSubscriber<>(readSuccessMeter, readFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); readSubscriber.context = readLatency.time(); performWorkload(readSubscriber, OperationType.Read, i); } else if (index < writeRange) { BenchmarkRequestSubscriber<Object> writeSubscriber = new BenchmarkRequestSubscriber<>(writeSuccessMeter, writeFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); writeSubscriber.context = writeLatency.time(); performWorkload(writeSubscriber, OperationType.Create, i); } else { BenchmarkRequestSubscriber<Object> querySubscriber = new BenchmarkRequestSubscriber<>(querySuccessMeter, queryFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); querySubscriber.context = queryLatency.time(); performWorkload(querySubscriber, OperationType.Query, i); } } synchronized (count) { while (count.get() < i) { count.wait(); } } long endTime = System.currentTimeMillis(); logger.info("[{}] operations performed in [{}] seconds.", configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); reporter.report(); reporter.close(); } private void parsedReadWriteQueryPct(String readWriteQueryPct) { String[] readWriteQueryPctList = readWriteQueryPct.split(","); if (readWriteQueryPctList.length == 3) { try { if (Integer.valueOf(readWriteQueryPctList[0]) + Integer.valueOf(readWriteQueryPctList[1]) + Integer.valueOf(readWriteQueryPctList[2]) == 100) { readPct = Integer.valueOf(readWriteQueryPctList[0]); writePct = Integer.valueOf(readWriteQueryPctList[1]); queryPct = Integer.valueOf(readWriteQueryPctList[2]); } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } catch (NumberFormatException ex) { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } private void createDatabaseAndContainers(Configuration cfg) { try { cosmosAsyncDatabase = cosmosClient.getDatabase(this.configuration.getDatabaseId()); cosmosAsyncDatabase.read().block(); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosClient.createDatabase(cfg.getDatabaseId(), ThroughputProperties.createManualThroughput(this.configuration.getThroughput())).block(); cosmosAsyncDatabase = cosmosClient.getDatabase(cfg.getDatabaseId()); logger.info("Database {} is created for this test", this.configuration.getDatabaseId()); databaseCreated = true; } else { throw e; } } int numberOfCollection = cfg.getNumberOfCollectionForCtl(); if (numberOfCollection < 1) { numberOfCollection = 1; } for (int i = 1; i <= numberOfCollection; i++) { try { CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); cosmosAsyncContainer.read().block(); containers.add(cosmosAsyncContainer); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosAsyncDatabase.createContainer( this.configuration.getCollectionId() + "_" + i, Configuration.DEFAULT_PARTITION_KEY_PATH ).block(); CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); logger.info("Collection {} is created for this test", this.configuration.getCollectionId() + "_" + i); containers.add(cosmosAsyncContainer); containerToClearAfterTest.add(cosmosAsyncContainer.getId()); } else { throw e; } } } } private void initializeReporter(Configuration configuration) { if (configuration.getGraphiteEndpoint() != null) { final Graphite graphite = new Graphite(new InetSocketAddress( configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort())); reporter = GraphiteReporter.forRegistry(metricsRegistry) .prefixedWith(configuration.getOperationType().name()) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .filter(MetricFilter.ALL) .build(graphite); } else if (configuration.getReportingDirectory() != null) { reporter = CsvReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(configuration.getReportingDirectory()); } else { reporter = ConsoleReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(); } } }
good catch, thanks, updated
void encode(final ByteBuf out) { final int expectedLength = RntbdRequestFrame.LENGTH + this.headers.computeLength(); final int start = out.writerIndex(); out.writeIntLE(expectedLength); this.frame.encode(out); this.headers.encode(out); checkState(out.writerIndex() - start == expectedLength, "encoding error: {\"expectedLength\": %s, \"observedLength\": %s}", expectedLength, out.writerIndex() - expectedLength); if (this.payload.length > 0) { out.writeIntLE(this.payload.length); out.writeBytes(this.payload); } }
out.writerIndex() - expectedLength);
void encode(final ByteBuf out) { final int expectedLength = RntbdRequestFrame.LENGTH + this.headers.computeLength(); final int start = out.writerIndex(); out.writeIntLE(expectedLength); this.frame.encode(out); this.headers.encode(out); final int observedLength = out.writerIndex() - start; checkState(observedLength == expectedLength, "encoding error: {\"expectedLength\": %s, \"observedLength\": %s}", expectedLength, observedLength); if (this.payload.length > 0) { out.writeIntLE(this.payload.length); out.writeBytes(this.payload); } }
class RntbdRequest { private static final byte[] EMPTY_BYTE_ARRAY = {}; private final RntbdRequestFrame frame; private final RntbdRequestHeaders headers; private final byte[] payload; private RntbdRequest(final RntbdRequestFrame frame, final RntbdRequestHeaders headers, final byte[] payload) { checkNotNull(frame, "frame"); checkNotNull(headers, "headers"); this.frame = frame; this.headers = headers; this.payload = payload == null ? EMPTY_BYTE_ARRAY : payload; } public UUID getActivityId() { return this.frame.getActivityId(); } @JsonIgnore @SuppressWarnings("unchecked") public <T> T getHeader(final RntbdRequestHeader header) { return (T) this.headers.get(header).getValue(); } public Long getTransportRequestId() { return this.getHeader(RntbdRequestHeader.TransportRequestID); } public static RntbdRequest decode(final ByteBuf in) { final int resourceOperationCode = in.getInt(in.readerIndex() + Integer.BYTES); if (resourceOperationCode == 0) { final String reason = String.format("resourceOperationCode=0x%08X", resourceOperationCode); throw new IllegalStateException(reason); } final int start = in.readerIndex(); final int expectedLength = in.readIntLE(); final RntbdRequestFrame header = RntbdRequestFrame.decode(in); final RntbdRequestHeaders metadata = RntbdRequestHeaders.decode(in); final ByteBuf payloadBuf = in.readSlice(expectedLength - (in.readerIndex() - start)); final int observedLength = in.readerIndex() - start; if (observedLength != expectedLength) { final String reason = Strings.lenientFormat("expectedLength=%s, observedLength=%s", expectedLength, observedLength); throw new IllegalStateException(reason); } final byte[] payload = new byte[payloadBuf.readableBytes()]; payloadBuf.readBytes(payload); in.discardReadBytes(); return new RntbdRequest(header, metadata, payload); } public static RntbdRequest from(final RntbdRequestArgs args) { final RxDocumentServiceRequest serviceRequest = args.serviceRequest(); final RntbdRequestFrame frame = new RntbdRequestFrame( args.activityId(), serviceRequest.getOperationType(), serviceRequest.getResourceType()); final RntbdRequestHeaders headers = new RntbdRequestHeaders(args, frame); return new RntbdRequest(frame, headers, serviceRequest.getContentAsByteArray()); } }
class RntbdRequest { private static final byte[] EMPTY_BYTE_ARRAY = {}; private final RntbdRequestFrame frame; private final RntbdRequestHeaders headers; private final byte[] payload; private RntbdRequest(final RntbdRequestFrame frame, final RntbdRequestHeaders headers, final byte[] payload) { checkNotNull(frame, "frame"); checkNotNull(headers, "headers"); this.frame = frame; this.headers = headers; this.payload = payload == null ? EMPTY_BYTE_ARRAY : payload; } public UUID getActivityId() { return this.frame.getActivityId(); } @JsonIgnore @SuppressWarnings("unchecked") public <T> T getHeader(final RntbdRequestHeader header) { return (T) this.headers.get(header).getValue(); } public Long getTransportRequestId() { return this.getHeader(RntbdRequestHeader.TransportRequestID); } public static RntbdRequest decode(final ByteBuf in) { final int resourceOperationCode = in.getInt(in.readerIndex() + Integer.BYTES); if (resourceOperationCode == 0) { final String reason = String.format("resourceOperationCode=0x%08X", resourceOperationCode); throw new IllegalStateException(reason); } final int start = in.readerIndex(); final int expectedLength = in.readIntLE(); final RntbdRequestFrame header = RntbdRequestFrame.decode(in); final RntbdRequestHeaders metadata = RntbdRequestHeaders.decode(in); final ByteBuf payloadBuf = in.readSlice(expectedLength - (in.readerIndex() - start)); final int observedLength = in.readerIndex() - start; if (observedLength != expectedLength) { final String reason = Strings.lenientFormat("expectedLength=%s, observedLength=%s", expectedLength, observedLength); throw new IllegalStateException(reason); } final byte[] payload = new byte[payloadBuf.readableBytes()]; payloadBuf.readBytes(payload); in.discardReadBytes(); return new RntbdRequest(header, metadata, payload); } public static RntbdRequest from(final RntbdRequestArgs args) { final RxDocumentServiceRequest serviceRequest = args.serviceRequest(); final RntbdRequestFrame frame = new RntbdRequestFrame( args.activityId(), serviceRequest.getOperationType(), serviceRequest.getResourceType()); final RntbdRequestHeaders headers = new RntbdRequestHeaders(args, frame); return new RntbdRequest(frame, headers, serviceRequest.getContentAsByteArray()); } }
I just read the whole code. makes sense to use writerIndex. thanks
void encode(final ByteBuf out) { final int expectedLength = RntbdRequestFrame.LENGTH + this.headers.computeLength(); final int start = out.writerIndex(); out.writeIntLE(expectedLength); this.frame.encode(out); this.headers.encode(out); checkState(out.writerIndex() - start == expectedLength, "encoding error: {\"expectedLength\": %s, \"observedLength\": %s}", expectedLength, out.writerIndex() - expectedLength); if (this.payload.length > 0) { out.writeIntLE(this.payload.length); out.writeBytes(this.payload); } }
final int start = out.writerIndex();
void encode(final ByteBuf out) { final int expectedLength = RntbdRequestFrame.LENGTH + this.headers.computeLength(); final int start = out.writerIndex(); out.writeIntLE(expectedLength); this.frame.encode(out); this.headers.encode(out); final int observedLength = out.writerIndex() - start; checkState(observedLength == expectedLength, "encoding error: {\"expectedLength\": %s, \"observedLength\": %s}", expectedLength, observedLength); if (this.payload.length > 0) { out.writeIntLE(this.payload.length); out.writeBytes(this.payload); } }
class RntbdRequest { private static final byte[] EMPTY_BYTE_ARRAY = {}; private final RntbdRequestFrame frame; private final RntbdRequestHeaders headers; private final byte[] payload; private RntbdRequest(final RntbdRequestFrame frame, final RntbdRequestHeaders headers, final byte[] payload) { checkNotNull(frame, "frame"); checkNotNull(headers, "headers"); this.frame = frame; this.headers = headers; this.payload = payload == null ? EMPTY_BYTE_ARRAY : payload; } public UUID getActivityId() { return this.frame.getActivityId(); } @JsonIgnore @SuppressWarnings("unchecked") public <T> T getHeader(final RntbdRequestHeader header) { return (T) this.headers.get(header).getValue(); } public Long getTransportRequestId() { return this.getHeader(RntbdRequestHeader.TransportRequestID); } public static RntbdRequest decode(final ByteBuf in) { final int resourceOperationCode = in.getInt(in.readerIndex() + Integer.BYTES); if (resourceOperationCode == 0) { final String reason = String.format("resourceOperationCode=0x%08X", resourceOperationCode); throw new IllegalStateException(reason); } final int start = in.readerIndex(); final int expectedLength = in.readIntLE(); final RntbdRequestFrame header = RntbdRequestFrame.decode(in); final RntbdRequestHeaders metadata = RntbdRequestHeaders.decode(in); final ByteBuf payloadBuf = in.readSlice(expectedLength - (in.readerIndex() - start)); final int observedLength = in.readerIndex() - start; if (observedLength != expectedLength) { final String reason = Strings.lenientFormat("expectedLength=%s, observedLength=%s", expectedLength, observedLength); throw new IllegalStateException(reason); } final byte[] payload = new byte[payloadBuf.readableBytes()]; payloadBuf.readBytes(payload); in.discardReadBytes(); return new RntbdRequest(header, metadata, payload); } public static RntbdRequest from(final RntbdRequestArgs args) { final RxDocumentServiceRequest serviceRequest = args.serviceRequest(); final RntbdRequestFrame frame = new RntbdRequestFrame( args.activityId(), serviceRequest.getOperationType(), serviceRequest.getResourceType()); final RntbdRequestHeaders headers = new RntbdRequestHeaders(args, frame); return new RntbdRequest(frame, headers, serviceRequest.getContentAsByteArray()); } }
class RntbdRequest { private static final byte[] EMPTY_BYTE_ARRAY = {}; private final RntbdRequestFrame frame; private final RntbdRequestHeaders headers; private final byte[] payload; private RntbdRequest(final RntbdRequestFrame frame, final RntbdRequestHeaders headers, final byte[] payload) { checkNotNull(frame, "frame"); checkNotNull(headers, "headers"); this.frame = frame; this.headers = headers; this.payload = payload == null ? EMPTY_BYTE_ARRAY : payload; } public UUID getActivityId() { return this.frame.getActivityId(); } @JsonIgnore @SuppressWarnings("unchecked") public <T> T getHeader(final RntbdRequestHeader header) { return (T) this.headers.get(header).getValue(); } public Long getTransportRequestId() { return this.getHeader(RntbdRequestHeader.TransportRequestID); } public static RntbdRequest decode(final ByteBuf in) { final int resourceOperationCode = in.getInt(in.readerIndex() + Integer.BYTES); if (resourceOperationCode == 0) { final String reason = String.format("resourceOperationCode=0x%08X", resourceOperationCode); throw new IllegalStateException(reason); } final int start = in.readerIndex(); final int expectedLength = in.readIntLE(); final RntbdRequestFrame header = RntbdRequestFrame.decode(in); final RntbdRequestHeaders metadata = RntbdRequestHeaders.decode(in); final ByteBuf payloadBuf = in.readSlice(expectedLength - (in.readerIndex() - start)); final int observedLength = in.readerIndex() - start; if (observedLength != expectedLength) { final String reason = Strings.lenientFormat("expectedLength=%s, observedLength=%s", expectedLength, observedLength); throw new IllegalStateException(reason); } final byte[] payload = new byte[payloadBuf.readableBytes()]; payloadBuf.readBytes(payload); in.discardReadBytes(); return new RntbdRequest(header, metadata, payload); } public static RntbdRequest from(final RntbdRequestArgs args) { final RxDocumentServiceRequest serviceRequest = args.serviceRequest(); final RntbdRequestFrame frame = new RntbdRequestFrame( args.activityId(), serviceRequest.getOperationType(), serviceRequest.getResourceType()); final RntbdRequestHeaders headers = new RntbdRequestHeaders(args, frame); return new RntbdRequest(frame, headers, serviceRequest.getContentAsByteArray()); } }
reported as service issue #_53
protected void assertListIncidentsForAlertOutput(Incident incident) { Assertions.assertNotNull(incident); Assertions.assertNotNull(incident.getId()); Assertions.assertNotNull(incident.getMetricId()); Assertions.assertNotNull(incident.getStatus()); Assertions.assertNotNull(incident.getLastTime()); Assertions.assertNotNull(incident.getDetectionConfigurationId()); Assertions.assertNotNull(incident.getRootDimensionKey()); Assertions.assertFalse(incident.getRootDimensionKey().asMap().isEmpty()); }
protected void assertListIncidentsForAlertOutput(Incident incident) { Assertions.assertNotNull(incident); Assertions.assertNotNull(incident.getId()); Assertions.assertNotNull(incident.getMetricId()); Assertions.assertNotNull(incident.getStatus()); Assertions.assertNotNull(incident.getLastTime()); Assertions.assertNotNull(incident.getDetectionConfigurationId()); Assertions.assertNotNull(incident.getRootDimensionKey()); Assertions.assertFalse(incident.getRootDimensionKey().asMap().isEmpty()); }
class ListIncidentsForAlertOutput { static final ListIncidentsForAlertOutput INSTANCE = new ListIncidentsForAlertOutput(); final int expectedIncidents = 2; }
class ListIncidentsForAlertOutput { static final ListIncidentsForAlertOutput INSTANCE = new ListIncidentsForAlertOutput(); final int expectedIncidents = 2; }
In `finally` should we attempt to clean up the resource only if the `alertConfigurationId` has a value (which means creation succeeded).
public void createAnomalyAlertConfiguration(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { final AtomicReference<String> alertConfigurationId = new AtomicReference<>(); try { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); creatAnomalyAlertRunner(inputAnomalyAlert -> StepVerifier.create(client.createAnomalyAlertConfiguration(inputAnomalyAlert)) .assertNext(createdAnomalyAlert -> { alertConfigurationId.set(createdAnomalyAlert.getId()); validateAnomalyAlertResult(inputAnomalyAlert, createdAnomalyAlert); }) .verifyComplete()); } finally { Mono<Void> deleteAnomalyAlertConfiguration = client.deleteAnomalyAlertConfiguration(alertConfigurationId.get()); StepVerifier.create(deleteAnomalyAlertConfiguration).verifyComplete(); } }
StepVerifier.create(deleteAnomalyAlertConfiguration).verifyComplete();
public void createAnomalyAlertConfiguration(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { final AtomicReference<String> alertConfigurationId = new AtomicReference<>(); try { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); creatAnomalyAlertRunner(inputAnomalyAlert -> StepVerifier.create(client.createAnomalyAlertConfiguration(inputAnomalyAlert)) .assertNext(createdAnomalyAlert -> { alertConfigurationId.set(createdAnomalyAlert.getId()); validateAnomalyAlertResult(inputAnomalyAlert, createdAnomalyAlert); }) .verifyComplete()); } finally { if (!CoreUtils.isNullOrEmpty(alertConfigurationId.get())) { Mono<Void> deleteAnomalyAlertConfiguration = client.deleteAnomalyAlertConfiguration(alertConfigurationId.get()); StepVerifier.create(deleteAnomalyAlertConfiguration).verifyComplete(); } } }
class AnomalyAlertAsyncTest extends AnomalyAlertTestBase { private MetricsAdvisorAdministrationAsyncClient client; @BeforeAll static void beforeAll() { TestBase.setupClass(); StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } /** * Verifies the result of the list anomaly alert configuration method when no options specified. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils void testListAnomalyAlert(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); listAnomalyAlertRunner(inputAnomalyAlertList -> { List<AnomalyAlertConfiguration> actualAnomalyAlertList = new ArrayList<>(); List<AnomalyAlertConfiguration> expectedAnomalyAlertList = inputAnomalyAlertList.stream().map(inputAnomalyAlert -> client.createAnomalyAlertConfiguration(inputAnomalyAlert).block()) .collect(Collectors.toList()); final AtomicInteger i = new AtomicInteger(-1); StepVerifier.create(client.listAnomalyAlertConfigurations(inputAnomalyAlertList.get(i.incrementAndGet()) .getMetricAlertConfigurations().get(i.get()).getDetectionConfigurationId())) .thenConsumeWhile(actualAnomalyAlertList::add) .verifyComplete(); final List<String> expectedAnomalyAlertIdList = expectedAnomalyAlertList.stream() .map(AnomalyAlertConfiguration::getId) .collect(Collectors.toList()); final List<AnomalyAlertConfiguration> actualList = actualAnomalyAlertList.stream().filter(actualConfiguration -> expectedAnomalyAlertIdList .contains(actualConfiguration.getId())) .collect(Collectors.toList()); assertEquals(inputAnomalyAlertList.size(), actualList.size()); expectedAnomalyAlertList.sort(Comparator.comparing(AnomalyAlertConfiguration::getName)); actualList.sort(Comparator.comparing(AnomalyAlertConfiguration::getName)); expectedAnomalyAlertList.forEach(expectedAnomalyAlert -> validateAnomalyAlertResult(expectedAnomalyAlert, actualList.get(i.get()))); expectedAnomalyAlertIdList.forEach(inputConfigId -> StepVerifier.create(client.deleteAnomalyAlertConfiguration(inputConfigId)).verifyComplete()); }); } /** * Verifies that an exception is thrown for null detection configuration Id parameter. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils public void getAnomalyAlertNullId(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); StepVerifier.create(client.getAnomalyAlertConfiguration(null)) .expectErrorMatches(throwable -> throwable instanceof NullPointerException && throwable.getMessage().equals("'alertConfigurationId' is required.")) .verify(); } /** * Verifies that an exception is thrown for invalid detection configuration Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils public void getAnomalyAlertInvalidId(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); StepVerifier.create(client.getAnomalyAlertConfiguration(INCORRECT_UUID)) .expectErrorMatches(throwable -> throwable instanceof IllegalArgumentException && throwable.getMessage().equals(INCORRECT_UUID_ERROR)) .verify(); } /** * Verifies a valid alert configuration info is returned with response for a valid alert configuration Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils public void getAnomalyAlertValidId(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); final AtomicReference<String> alertConfigurationId = new AtomicReference<>(); creatAnomalyAlertRunner(inputAnomalyAlertConfiguration -> { final AnomalyAlertConfiguration createdAnomalyAlert = client.createAnomalyAlertConfiguration(inputAnomalyAlertConfiguration).block(); alertConfigurationId.set(createdAnomalyAlert.getId()); StepVerifier.create(client.getAnomalyAlertConfigurationWithResponse(alertConfigurationId.get())) .assertNext(anomalyAlertConfigurationResponse -> { assertEquals(anomalyAlertConfigurationResponse.getStatusCode(), HttpResponseStatus.OK.code()); validateAnomalyAlertResult(createdAnomalyAlert, anomalyAlertConfigurationResponse.getValue()); }); }); Mono<Void> deleteAnomalyAlertConfiguration = client.deleteAnomalyAlertConfiguration(alertConfigurationId.get()); StepVerifier.create(deleteAnomalyAlertConfiguration).verifyComplete(); } /** * Verifies valid anomaly alert configuration created for required anomaly alert configuration details. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils /** * Verifies happy path for delete anomaly alert configuration. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils public void deleteAnomalyAlertWithResponse(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); creatAnomalyAlertRunner(inputAnomalyAlertConfig -> { final AnomalyAlertConfiguration createdAnomalyAlert = client.createAnomalyAlertConfiguration(inputAnomalyAlertConfig).block(); StepVerifier.create(client.deleteAnomalyAlertConfigurationWithResponse(createdAnomalyAlert.getId())) .assertNext(response -> assertEquals(HttpResponseStatus.NO_CONTENT.code(), response.getStatusCode())) .verifyComplete(); StepVerifier.create(client.getAnomalyAlertConfigurationWithResponse(createdAnomalyAlert.getId())) .verifyErrorSatisfies(throwable -> { assertEquals(ErrorCodeException.class, throwable.getClass()); final ErrorCodeException errorCodeException = (ErrorCodeException) throwable; assertEquals(HttpResponseStatus.NOT_FOUND.code(), errorCodeException.getResponse().getStatusCode()); }); }); } /** * Verifies previously created anomaly alert configuration can be updated successfully to update the metrics * operator. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils public void updateAnomalyAlertHappyPath(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { final AtomicReference<String> inputAnomalyAlertConfigId = new AtomicReference<>(); try { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); creatAnomalyAlertRunner(inputAnomalyAlert -> { final AnomalyAlertConfiguration createdAnomalyAlert = client.createAnomalyAlertConfiguration(inputAnomalyAlert).block(); inputAnomalyAlertConfigId.set(createdAnomalyAlert.getId()); final MetricAnomalyAlertConfiguration metricAnomalyAlertConfiguration = new MetricAnomalyAlertConfiguration(DETECTION_CONFIGURATION_ID, MetricAnomalyAlertScope.forWholeSeries()); final MetricAnomalyAlertConfiguration metricAnomalyAlertConfiguration2 = new MetricAnomalyAlertConfiguration("e17f32d4-3ddf-4dc7-84ee-b4130c7e1777", MetricAnomalyAlertScope.forWholeSeries()); StepVerifier.create(client.updateAnomalyAlertConfiguration( createdAnomalyAlert.setMetricAlertConfigurations( Arrays.asList(metricAnomalyAlertConfiguration, metricAnomalyAlertConfiguration2)) .setCrossMetricsOperator(MetricAnomalyAlertConfigurationsOperator.XOR))) .assertNext(updatedAnomalyAlert -> { validateAnomalyAlertResult(inputAnomalyAlert .addMetricAlertConfiguration(metricAnomalyAlertConfiguration2), updatedAnomalyAlert); assertEquals(MetricAnomalyAlertConfigurationsOperator.XOR.toString(), updatedAnomalyAlert.getCrossMetricsOperator().toString()); }).verifyComplete(); StepVerifier.create(client.updateAnomalyAlertConfiguration( createdAnomalyAlert.setMetricAlertConfigurations(null))) .verifyErrorSatisfies(throwable -> assertEquals( "'alertConfiguration.metricAnomalyAlertConfigurations' is required and cannot be empty", throwable.getMessage())); }); } finally { Mono<Void> deleteAnomalyAlertConfiguration = client.deleteAnomalyAlertConfiguration(inputAnomalyAlertConfigId.get()); StepVerifier.create(deleteAnomalyAlertConfiguration).verifyComplete(); } } /** * Verifies update for a removing hooks from a previously created anomaly alert configuration's. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils public void updateAnomalyAlertRemoveHooks(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { final AtomicReference<String> alertConfigurationId = new AtomicReference<>(); try { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); creatAnomalyAlertRunner(inputAnomalyAlert -> { final AnomalyAlertConfiguration createdAnomalyAlert = client.createAnomalyAlertConfiguration(inputAnomalyAlert).block(); alertConfigurationId.set(createdAnomalyAlert.getId()); StepVerifier.create(client.updateAnomalyAlertConfiguration( createdAnomalyAlert.removeHookToAlert(ALERT_HOOK_ID))) .assertNext(updatedAnomalyAlert -> assertEquals(0, updatedAnomalyAlert.getIdOfHooksToAlert().size())) .verifyComplete(); }); } finally { Mono<Void> deleteAnomalyAlertConfiguration = client.deleteAnomalyAlertConfiguration(alertConfigurationId.get()); StepVerifier.create(deleteAnomalyAlertConfiguration).verifyComplete(); } } }
class AnomalyAlertAsyncTest extends AnomalyAlertTestBase { private MetricsAdvisorAdministrationAsyncClient client; @BeforeAll static void beforeAll() { TestBase.setupClass(); StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } /** * Verifies the result of the list anomaly alert configuration method when no options specified. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils void testListAnomalyAlert(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { final AtomicReference<List<String>> expectedAnomalyAlertIdList = new AtomicReference<List<String>>(); try { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); listAnomalyAlertRunner(inputAnomalyAlertList -> { List<AnomalyAlertConfiguration> actualAnomalyAlertList = new ArrayList<>(); List<AnomalyAlertConfiguration> expectedAnomalyAlertList = inputAnomalyAlertList.stream().map(inputAnomalyAlert -> client.createAnomalyAlertConfiguration(inputAnomalyAlert).block()) .collect(Collectors.toList()); final AtomicInteger i = new AtomicInteger(-1); StepVerifier.create(client.listAnomalyAlertConfigurations(inputAnomalyAlertList.get(i.incrementAndGet()) .getMetricAlertConfigurations().get(i.get()).getDetectionConfigurationId())) .thenConsumeWhile(actualAnomalyAlertList::add) .verifyComplete(); expectedAnomalyAlertIdList.set(expectedAnomalyAlertList.stream() .map(AnomalyAlertConfiguration::getId) .collect(Collectors.toList())); final List<AnomalyAlertConfiguration> actualList = actualAnomalyAlertList.stream().filter(actualConfiguration -> expectedAnomalyAlertIdList.get() .contains(actualConfiguration.getId())) .collect(Collectors.toList()); assertEquals(inputAnomalyAlertList.size(), actualList.size()); expectedAnomalyAlertList.sort(Comparator.comparing(AnomalyAlertConfiguration::getName)); actualList.sort(Comparator.comparing(AnomalyAlertConfiguration::getName)); expectedAnomalyAlertList.forEach(expectedAnomalyAlert -> validateAnomalyAlertResult(expectedAnomalyAlert, actualList.get(i.get()))); }); } finally { if (!CoreUtils.isNullOrEmpty(expectedAnomalyAlertIdList.get())) { expectedAnomalyAlertIdList.get().forEach(inputConfigId -> StepVerifier.create(client.deleteAnomalyAlertConfiguration(inputConfigId)).verifyComplete()); } } } /** * Verifies that an exception is thrown for null detection configuration Id parameter. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils public void getAnomalyAlertNullId(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); StepVerifier.create(client.getAnomalyAlertConfiguration(null)) .expectErrorMatches(throwable -> throwable instanceof NullPointerException && throwable.getMessage().equals("'alertConfigurationId' is required.")) .verify(); } /** * Verifies that an exception is thrown for invalid detection configuration Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils public void getAnomalyAlertInvalidId(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); StepVerifier.create(client.getAnomalyAlertConfiguration(INCORRECT_UUID)) .expectErrorMatches(throwable -> throwable instanceof IllegalArgumentException && throwable.getMessage().equals(INCORRECT_UUID_ERROR)) .verify(); } /** * Verifies a valid alert configuration info is returned with response for a valid alert configuration Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils public void getAnomalyAlertValidId(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); final AtomicReference<String> alertConfigurationId = new AtomicReference<>(); creatAnomalyAlertRunner(inputAnomalyAlertConfiguration -> { final AnomalyAlertConfiguration createdAnomalyAlert = client.createAnomalyAlertConfiguration(inputAnomalyAlertConfiguration).block(); assertNotNull(createdAnomalyAlert); alertConfigurationId.set(createdAnomalyAlert.getId()); StepVerifier.create(client.getAnomalyAlertConfigurationWithResponse(alertConfigurationId.get())) .assertNext(anomalyAlertConfigurationResponse -> { assertEquals(anomalyAlertConfigurationResponse.getStatusCode(), HttpResponseStatus.OK.code()); validateAnomalyAlertResult(createdAnomalyAlert, anomalyAlertConfigurationResponse.getValue()); }); }); Mono<Void> deleteAnomalyAlertConfiguration = client.deleteAnomalyAlertConfiguration(alertConfigurationId.get()); StepVerifier.create(deleteAnomalyAlertConfiguration).verifyComplete(); } /** * Verifies valid anomaly alert configuration created for required anomaly alert configuration details. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils /** * Verifies happy path for delete anomaly alert configuration. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils public void deleteAnomalyAlertWithResponse(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); creatAnomalyAlertRunner(inputAnomalyAlertConfig -> { final AnomalyAlertConfiguration createdAnomalyAlert = client.createAnomalyAlertConfiguration(inputAnomalyAlertConfig).block(); assertNotNull(createdAnomalyAlert); StepVerifier.create(client.deleteAnomalyAlertConfigurationWithResponse(createdAnomalyAlert.getId())) .assertNext(response -> assertEquals(HttpResponseStatus.NO_CONTENT.code(), response.getStatusCode())) .verifyComplete(); StepVerifier.create(client.getAnomalyAlertConfigurationWithResponse(createdAnomalyAlert.getId())) .verifyErrorSatisfies(throwable -> { assertEquals(ErrorCodeException.class, throwable.getClass()); final ErrorCodeException errorCodeException = (ErrorCodeException) throwable; assertEquals(HttpResponseStatus.NOT_FOUND.code(), errorCodeException.getResponse().getStatusCode()); }); }); } /** * Verifies previously created anomaly alert configuration can be updated successfully to update the metrics * operator. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils public void updateAnomalyAlertHappyPath(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { final AtomicReference<String> alertConfigId = new AtomicReference<>(); try { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); creatAnomalyAlertRunner(inputAnomalyAlert -> { final AnomalyAlertConfiguration createdAnomalyAlert = client.createAnomalyAlertConfiguration(inputAnomalyAlert).block(); assertNotNull(createdAnomalyAlert); alertConfigId.set(createdAnomalyAlert.getId()); final MetricAnomalyAlertConfiguration metricAnomalyAlertConfiguration = new MetricAnomalyAlertConfiguration(DETECTION_CONFIGURATION_ID, MetricAnomalyAlertScope.forWholeSeries()); final MetricAnomalyAlertConfiguration metricAnomalyAlertConfiguration2 = new MetricAnomalyAlertConfiguration("e17f32d4-3ddf-4dc7-84ee-b4130c7e1777", MetricAnomalyAlertScope.forWholeSeries()); StepVerifier.create(client.updateAnomalyAlertConfiguration( createdAnomalyAlert.setMetricAlertConfigurations( Arrays.asList(metricAnomalyAlertConfiguration, metricAnomalyAlertConfiguration2)) .setCrossMetricsOperator(MetricAnomalyAlertConfigurationsOperator.XOR))) .assertNext(updatedAnomalyAlert -> { validateAnomalyAlertResult(inputAnomalyAlert .addMetricAlertConfiguration(metricAnomalyAlertConfiguration2), updatedAnomalyAlert); assertEquals(MetricAnomalyAlertConfigurationsOperator.XOR.toString(), updatedAnomalyAlert.getCrossMetricsOperator().toString()); }).verifyComplete(); StepVerifier.create(client.updateAnomalyAlertConfiguration( createdAnomalyAlert.setMetricAlertConfigurations(null))) .verifyErrorSatisfies(throwable -> assertEquals( "'alertConfiguration.metricAnomalyAlertConfigurations' is required and cannot be empty", throwable.getMessage())); }); } finally { if (!CoreUtils.isNullOrEmpty(alertConfigId.get())) { Mono<Void> deleteAnomalyAlertConfiguration = client.deleteAnomalyAlertConfiguration(alertConfigId.get()); StepVerifier.create(deleteAnomalyAlertConfiguration).verifyComplete(); } } } /** * Verifies update for a removing hooks from a previously created anomaly alert configuration's. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils public void updateAnomalyAlertRemoveHooks(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { final AtomicReference<String> alertConfigId = new AtomicReference<>(); try { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); creatAnomalyAlertRunner(inputAnomalyAlert -> { final AnomalyAlertConfiguration createdAnomalyAlert = client.createAnomalyAlertConfiguration(inputAnomalyAlert).block(); assertNotNull(createdAnomalyAlert); alertConfigId.set(createdAnomalyAlert.getId()); StepVerifier.create(client.updateAnomalyAlertConfiguration( createdAnomalyAlert.removeHookToAlert(ALERT_HOOK_ID))) .assertNext(updatedAnomalyAlert -> assertEquals(0, updatedAnomalyAlert.getIdOfHooksToAlert().size())) .verifyComplete(); }); } finally { if (!CoreUtils.isNullOrEmpty(alertConfigId.get())) { Mono<Void> deleteAnomalyAlertConfiguration = client.deleteAnomalyAlertConfiguration(alertConfigId.get()); StepVerifier.create(deleteAnomalyAlertConfiguration).verifyComplete(); } } } }
the same comment for other applicable places we do the cleanup in finally.
public void createAnomalyAlertConfiguration(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { final AtomicReference<String> alertConfigurationId = new AtomicReference<>(); try { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); creatAnomalyAlertRunner(inputAnomalyAlert -> StepVerifier.create(client.createAnomalyAlertConfiguration(inputAnomalyAlert)) .assertNext(createdAnomalyAlert -> { alertConfigurationId.set(createdAnomalyAlert.getId()); validateAnomalyAlertResult(inputAnomalyAlert, createdAnomalyAlert); }) .verifyComplete()); } finally { Mono<Void> deleteAnomalyAlertConfiguration = client.deleteAnomalyAlertConfiguration(alertConfigurationId.get()); StepVerifier.create(deleteAnomalyAlertConfiguration).verifyComplete(); } }
StepVerifier.create(deleteAnomalyAlertConfiguration).verifyComplete();
public void createAnomalyAlertConfiguration(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { final AtomicReference<String> alertConfigurationId = new AtomicReference<>(); try { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); creatAnomalyAlertRunner(inputAnomalyAlert -> StepVerifier.create(client.createAnomalyAlertConfiguration(inputAnomalyAlert)) .assertNext(createdAnomalyAlert -> { alertConfigurationId.set(createdAnomalyAlert.getId()); validateAnomalyAlertResult(inputAnomalyAlert, createdAnomalyAlert); }) .verifyComplete()); } finally { if (!CoreUtils.isNullOrEmpty(alertConfigurationId.get())) { Mono<Void> deleteAnomalyAlertConfiguration = client.deleteAnomalyAlertConfiguration(alertConfigurationId.get()); StepVerifier.create(deleteAnomalyAlertConfiguration).verifyComplete(); } } }
class AnomalyAlertAsyncTest extends AnomalyAlertTestBase { private MetricsAdvisorAdministrationAsyncClient client; @BeforeAll static void beforeAll() { TestBase.setupClass(); StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } /** * Verifies the result of the list anomaly alert configuration method when no options specified. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils void testListAnomalyAlert(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); listAnomalyAlertRunner(inputAnomalyAlertList -> { List<AnomalyAlertConfiguration> actualAnomalyAlertList = new ArrayList<>(); List<AnomalyAlertConfiguration> expectedAnomalyAlertList = inputAnomalyAlertList.stream().map(inputAnomalyAlert -> client.createAnomalyAlertConfiguration(inputAnomalyAlert).block()) .collect(Collectors.toList()); final AtomicInteger i = new AtomicInteger(-1); StepVerifier.create(client.listAnomalyAlertConfigurations(inputAnomalyAlertList.get(i.incrementAndGet()) .getMetricAlertConfigurations().get(i.get()).getDetectionConfigurationId())) .thenConsumeWhile(actualAnomalyAlertList::add) .verifyComplete(); final List<String> expectedAnomalyAlertIdList = expectedAnomalyAlertList.stream() .map(AnomalyAlertConfiguration::getId) .collect(Collectors.toList()); final List<AnomalyAlertConfiguration> actualList = actualAnomalyAlertList.stream().filter(actualConfiguration -> expectedAnomalyAlertIdList .contains(actualConfiguration.getId())) .collect(Collectors.toList()); assertEquals(inputAnomalyAlertList.size(), actualList.size()); expectedAnomalyAlertList.sort(Comparator.comparing(AnomalyAlertConfiguration::getName)); actualList.sort(Comparator.comparing(AnomalyAlertConfiguration::getName)); expectedAnomalyAlertList.forEach(expectedAnomalyAlert -> validateAnomalyAlertResult(expectedAnomalyAlert, actualList.get(i.get()))); expectedAnomalyAlertIdList.forEach(inputConfigId -> StepVerifier.create(client.deleteAnomalyAlertConfiguration(inputConfigId)).verifyComplete()); }); } /** * Verifies that an exception is thrown for null detection configuration Id parameter. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils public void getAnomalyAlertNullId(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); StepVerifier.create(client.getAnomalyAlertConfiguration(null)) .expectErrorMatches(throwable -> throwable instanceof NullPointerException && throwable.getMessage().equals("'alertConfigurationId' is required.")) .verify(); } /** * Verifies that an exception is thrown for invalid detection configuration Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils public void getAnomalyAlertInvalidId(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); StepVerifier.create(client.getAnomalyAlertConfiguration(INCORRECT_UUID)) .expectErrorMatches(throwable -> throwable instanceof IllegalArgumentException && throwable.getMessage().equals(INCORRECT_UUID_ERROR)) .verify(); } /** * Verifies a valid alert configuration info is returned with response for a valid alert configuration Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils public void getAnomalyAlertValidId(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); final AtomicReference<String> alertConfigurationId = new AtomicReference<>(); creatAnomalyAlertRunner(inputAnomalyAlertConfiguration -> { final AnomalyAlertConfiguration createdAnomalyAlert = client.createAnomalyAlertConfiguration(inputAnomalyAlertConfiguration).block(); alertConfigurationId.set(createdAnomalyAlert.getId()); StepVerifier.create(client.getAnomalyAlertConfigurationWithResponse(alertConfigurationId.get())) .assertNext(anomalyAlertConfigurationResponse -> { assertEquals(anomalyAlertConfigurationResponse.getStatusCode(), HttpResponseStatus.OK.code()); validateAnomalyAlertResult(createdAnomalyAlert, anomalyAlertConfigurationResponse.getValue()); }); }); Mono<Void> deleteAnomalyAlertConfiguration = client.deleteAnomalyAlertConfiguration(alertConfigurationId.get()); StepVerifier.create(deleteAnomalyAlertConfiguration).verifyComplete(); } /** * Verifies valid anomaly alert configuration created for required anomaly alert configuration details. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils /** * Verifies happy path for delete anomaly alert configuration. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils public void deleteAnomalyAlertWithResponse(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); creatAnomalyAlertRunner(inputAnomalyAlertConfig -> { final AnomalyAlertConfiguration createdAnomalyAlert = client.createAnomalyAlertConfiguration(inputAnomalyAlertConfig).block(); StepVerifier.create(client.deleteAnomalyAlertConfigurationWithResponse(createdAnomalyAlert.getId())) .assertNext(response -> assertEquals(HttpResponseStatus.NO_CONTENT.code(), response.getStatusCode())) .verifyComplete(); StepVerifier.create(client.getAnomalyAlertConfigurationWithResponse(createdAnomalyAlert.getId())) .verifyErrorSatisfies(throwable -> { assertEquals(ErrorCodeException.class, throwable.getClass()); final ErrorCodeException errorCodeException = (ErrorCodeException) throwable; assertEquals(HttpResponseStatus.NOT_FOUND.code(), errorCodeException.getResponse().getStatusCode()); }); }); } /** * Verifies previously created anomaly alert configuration can be updated successfully to update the metrics * operator. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils public void updateAnomalyAlertHappyPath(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { final AtomicReference<String> inputAnomalyAlertConfigId = new AtomicReference<>(); try { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); creatAnomalyAlertRunner(inputAnomalyAlert -> { final AnomalyAlertConfiguration createdAnomalyAlert = client.createAnomalyAlertConfiguration(inputAnomalyAlert).block(); inputAnomalyAlertConfigId.set(createdAnomalyAlert.getId()); final MetricAnomalyAlertConfiguration metricAnomalyAlertConfiguration = new MetricAnomalyAlertConfiguration(DETECTION_CONFIGURATION_ID, MetricAnomalyAlertScope.forWholeSeries()); final MetricAnomalyAlertConfiguration metricAnomalyAlertConfiguration2 = new MetricAnomalyAlertConfiguration("e17f32d4-3ddf-4dc7-84ee-b4130c7e1777", MetricAnomalyAlertScope.forWholeSeries()); StepVerifier.create(client.updateAnomalyAlertConfiguration( createdAnomalyAlert.setMetricAlertConfigurations( Arrays.asList(metricAnomalyAlertConfiguration, metricAnomalyAlertConfiguration2)) .setCrossMetricsOperator(MetricAnomalyAlertConfigurationsOperator.XOR))) .assertNext(updatedAnomalyAlert -> { validateAnomalyAlertResult(inputAnomalyAlert .addMetricAlertConfiguration(metricAnomalyAlertConfiguration2), updatedAnomalyAlert); assertEquals(MetricAnomalyAlertConfigurationsOperator.XOR.toString(), updatedAnomalyAlert.getCrossMetricsOperator().toString()); }).verifyComplete(); StepVerifier.create(client.updateAnomalyAlertConfiguration( createdAnomalyAlert.setMetricAlertConfigurations(null))) .verifyErrorSatisfies(throwable -> assertEquals( "'alertConfiguration.metricAnomalyAlertConfigurations' is required and cannot be empty", throwable.getMessage())); }); } finally { Mono<Void> deleteAnomalyAlertConfiguration = client.deleteAnomalyAlertConfiguration(inputAnomalyAlertConfigId.get()); StepVerifier.create(deleteAnomalyAlertConfiguration).verifyComplete(); } } /** * Verifies update for a removing hooks from a previously created anomaly alert configuration's. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils public void updateAnomalyAlertRemoveHooks(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { final AtomicReference<String> alertConfigurationId = new AtomicReference<>(); try { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); creatAnomalyAlertRunner(inputAnomalyAlert -> { final AnomalyAlertConfiguration createdAnomalyAlert = client.createAnomalyAlertConfiguration(inputAnomalyAlert).block(); alertConfigurationId.set(createdAnomalyAlert.getId()); StepVerifier.create(client.updateAnomalyAlertConfiguration( createdAnomalyAlert.removeHookToAlert(ALERT_HOOK_ID))) .assertNext(updatedAnomalyAlert -> assertEquals(0, updatedAnomalyAlert.getIdOfHooksToAlert().size())) .verifyComplete(); }); } finally { Mono<Void> deleteAnomalyAlertConfiguration = client.deleteAnomalyAlertConfiguration(alertConfigurationId.get()); StepVerifier.create(deleteAnomalyAlertConfiguration).verifyComplete(); } } }
class AnomalyAlertAsyncTest extends AnomalyAlertTestBase { private MetricsAdvisorAdministrationAsyncClient client; @BeforeAll static void beforeAll() { TestBase.setupClass(); StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } /** * Verifies the result of the list anomaly alert configuration method when no options specified. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils void testListAnomalyAlert(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { final AtomicReference<List<String>> expectedAnomalyAlertIdList = new AtomicReference<List<String>>(); try { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); listAnomalyAlertRunner(inputAnomalyAlertList -> { List<AnomalyAlertConfiguration> actualAnomalyAlertList = new ArrayList<>(); List<AnomalyAlertConfiguration> expectedAnomalyAlertList = inputAnomalyAlertList.stream().map(inputAnomalyAlert -> client.createAnomalyAlertConfiguration(inputAnomalyAlert).block()) .collect(Collectors.toList()); final AtomicInteger i = new AtomicInteger(-1); StepVerifier.create(client.listAnomalyAlertConfigurations(inputAnomalyAlertList.get(i.incrementAndGet()) .getMetricAlertConfigurations().get(i.get()).getDetectionConfigurationId())) .thenConsumeWhile(actualAnomalyAlertList::add) .verifyComplete(); expectedAnomalyAlertIdList.set(expectedAnomalyAlertList.stream() .map(AnomalyAlertConfiguration::getId) .collect(Collectors.toList())); final List<AnomalyAlertConfiguration> actualList = actualAnomalyAlertList.stream().filter(actualConfiguration -> expectedAnomalyAlertIdList.get() .contains(actualConfiguration.getId())) .collect(Collectors.toList()); assertEquals(inputAnomalyAlertList.size(), actualList.size()); expectedAnomalyAlertList.sort(Comparator.comparing(AnomalyAlertConfiguration::getName)); actualList.sort(Comparator.comparing(AnomalyAlertConfiguration::getName)); expectedAnomalyAlertList.forEach(expectedAnomalyAlert -> validateAnomalyAlertResult(expectedAnomalyAlert, actualList.get(i.get()))); }); } finally { if (!CoreUtils.isNullOrEmpty(expectedAnomalyAlertIdList.get())) { expectedAnomalyAlertIdList.get().forEach(inputConfigId -> StepVerifier.create(client.deleteAnomalyAlertConfiguration(inputConfigId)).verifyComplete()); } } } /** * Verifies that an exception is thrown for null detection configuration Id parameter. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils public void getAnomalyAlertNullId(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); StepVerifier.create(client.getAnomalyAlertConfiguration(null)) .expectErrorMatches(throwable -> throwable instanceof NullPointerException && throwable.getMessage().equals("'alertConfigurationId' is required.")) .verify(); } /** * Verifies that an exception is thrown for invalid detection configuration Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils public void getAnomalyAlertInvalidId(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); StepVerifier.create(client.getAnomalyAlertConfiguration(INCORRECT_UUID)) .expectErrorMatches(throwable -> throwable instanceof IllegalArgumentException && throwable.getMessage().equals(INCORRECT_UUID_ERROR)) .verify(); } /** * Verifies a valid alert configuration info is returned with response for a valid alert configuration Id. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils public void getAnomalyAlertValidId(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); final AtomicReference<String> alertConfigurationId = new AtomicReference<>(); creatAnomalyAlertRunner(inputAnomalyAlertConfiguration -> { final AnomalyAlertConfiguration createdAnomalyAlert = client.createAnomalyAlertConfiguration(inputAnomalyAlertConfiguration).block(); assertNotNull(createdAnomalyAlert); alertConfigurationId.set(createdAnomalyAlert.getId()); StepVerifier.create(client.getAnomalyAlertConfigurationWithResponse(alertConfigurationId.get())) .assertNext(anomalyAlertConfigurationResponse -> { assertEquals(anomalyAlertConfigurationResponse.getStatusCode(), HttpResponseStatus.OK.code()); validateAnomalyAlertResult(createdAnomalyAlert, anomalyAlertConfigurationResponse.getValue()); }); }); Mono<Void> deleteAnomalyAlertConfiguration = client.deleteAnomalyAlertConfiguration(alertConfigurationId.get()); StepVerifier.create(deleteAnomalyAlertConfiguration).verifyComplete(); } /** * Verifies valid anomaly alert configuration created for required anomaly alert configuration details. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils /** * Verifies happy path for delete anomaly alert configuration. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils public void deleteAnomalyAlertWithResponse(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); creatAnomalyAlertRunner(inputAnomalyAlertConfig -> { final AnomalyAlertConfiguration createdAnomalyAlert = client.createAnomalyAlertConfiguration(inputAnomalyAlertConfig).block(); assertNotNull(createdAnomalyAlert); StepVerifier.create(client.deleteAnomalyAlertConfigurationWithResponse(createdAnomalyAlert.getId())) .assertNext(response -> assertEquals(HttpResponseStatus.NO_CONTENT.code(), response.getStatusCode())) .verifyComplete(); StepVerifier.create(client.getAnomalyAlertConfigurationWithResponse(createdAnomalyAlert.getId())) .verifyErrorSatisfies(throwable -> { assertEquals(ErrorCodeException.class, throwable.getClass()); final ErrorCodeException errorCodeException = (ErrorCodeException) throwable; assertEquals(HttpResponseStatus.NOT_FOUND.code(), errorCodeException.getResponse().getStatusCode()); }); }); } /** * Verifies previously created anomaly alert configuration can be updated successfully to update the metrics * operator. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils public void updateAnomalyAlertHappyPath(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { final AtomicReference<String> alertConfigId = new AtomicReference<>(); try { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); creatAnomalyAlertRunner(inputAnomalyAlert -> { final AnomalyAlertConfiguration createdAnomalyAlert = client.createAnomalyAlertConfiguration(inputAnomalyAlert).block(); assertNotNull(createdAnomalyAlert); alertConfigId.set(createdAnomalyAlert.getId()); final MetricAnomalyAlertConfiguration metricAnomalyAlertConfiguration = new MetricAnomalyAlertConfiguration(DETECTION_CONFIGURATION_ID, MetricAnomalyAlertScope.forWholeSeries()); final MetricAnomalyAlertConfiguration metricAnomalyAlertConfiguration2 = new MetricAnomalyAlertConfiguration("e17f32d4-3ddf-4dc7-84ee-b4130c7e1777", MetricAnomalyAlertScope.forWholeSeries()); StepVerifier.create(client.updateAnomalyAlertConfiguration( createdAnomalyAlert.setMetricAlertConfigurations( Arrays.asList(metricAnomalyAlertConfiguration, metricAnomalyAlertConfiguration2)) .setCrossMetricsOperator(MetricAnomalyAlertConfigurationsOperator.XOR))) .assertNext(updatedAnomalyAlert -> { validateAnomalyAlertResult(inputAnomalyAlert .addMetricAlertConfiguration(metricAnomalyAlertConfiguration2), updatedAnomalyAlert); assertEquals(MetricAnomalyAlertConfigurationsOperator.XOR.toString(), updatedAnomalyAlert.getCrossMetricsOperator().toString()); }).verifyComplete(); StepVerifier.create(client.updateAnomalyAlertConfiguration( createdAnomalyAlert.setMetricAlertConfigurations(null))) .verifyErrorSatisfies(throwable -> assertEquals( "'alertConfiguration.metricAnomalyAlertConfigurations' is required and cannot be empty", throwable.getMessage())); }); } finally { if (!CoreUtils.isNullOrEmpty(alertConfigId.get())) { Mono<Void> deleteAnomalyAlertConfiguration = client.deleteAnomalyAlertConfiguration(alertConfigId.get()); StepVerifier.create(deleteAnomalyAlertConfiguration).verifyComplete(); } } } /** * Verifies update for a removing hooks from a previously created anomaly alert configuration's. */ @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.metricsadvisor.TestUtils public void updateAnomalyAlertRemoveHooks(HttpClient httpClient, MetricsAdvisorServiceVersion serviceVersion) { final AtomicReference<String> alertConfigId = new AtomicReference<>(); try { client = getMetricsAdvisorAdministrationBuilder(httpClient, serviceVersion).buildAsyncClient(); creatAnomalyAlertRunner(inputAnomalyAlert -> { final AnomalyAlertConfiguration createdAnomalyAlert = client.createAnomalyAlertConfiguration(inputAnomalyAlert).block(); assertNotNull(createdAnomalyAlert); alertConfigId.set(createdAnomalyAlert.getId()); StepVerifier.create(client.updateAnomalyAlertConfiguration( createdAnomalyAlert.removeHookToAlert(ALERT_HOOK_ID))) .assertNext(updatedAnomalyAlert -> assertEquals(0, updatedAnomalyAlert.getIdOfHooksToAlert().size())) .verifyComplete(); }); } finally { if (!CoreUtils.isNullOrEmpty(alertConfigId.get())) { Mono<Void> deleteAnomalyAlertConfiguration = client.deleteAnomalyAlertConfiguration(alertConfigId.get()); StepVerifier.create(deleteAnomalyAlertConfiguration).verifyComplete(); } } } }
would need to change `authenticate(httpPipeline, profile)` in https://github.com/Azure/azure-sdk-for-java/pull/16144
public void testManageFunctionAppLogs() throws IOException { if (skipInPlayback()) { return; } azureResourceManager = AzureResourceManager.authenticate( setReadTimeout(azureResourceManager.storageAccounts().manager().httpPipeline(), Duration.ofMinutes(10)), new AzureProfile(azureResourceManager.tenantId(), azureResourceManager.subscriptionId(), AzureEnvironment.AZURE) ) .withDefaultSubscription(); Assertions.assertTrue(ManageFunctionAppLogs.runSample(azureResourceManager)); }
.withDefaultSubscription();
public void testManageFunctionAppLogs() throws IOException { if (skipInPlayback()) { return; } azureResourceManager = AzureResourceManager.authenticate( setReadTimeout(azureResourceManager.storageAccounts().manager().httpPipeline(), Duration.ofMinutes(10)), new AzureProfile(azureResourceManager.tenantId(), azureResourceManager.subscriptionId(), AzureEnvironment.AZURE) ) .withDefaultSubscription(); Assertions.assertTrue(ManageFunctionAppLogs.runSample(azureResourceManager)); }
class AppServiceSampleLiveOnlyTests extends SamplesTestBase { @Test @DoNotRecord public void testManageWebAppSourceControl() throws GitAPIException { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageWebAppSourceControl.runSample(azureResourceManager)); } @Test @DoNotRecord public void testManageWebAppStorageAccountConnection() { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageWebAppStorageAccountConnection.runSample(azureResourceManager)); } @Test @DoNotRecord public void testManageLinuxWebAppSourceControl() throws GitAPIException { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageLinuxWebAppSourceControl.runSample(azureResourceManager)); } @Test @DoNotRecord public void testManageLinuxWebAppStorageAccountConnection() { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageLinuxWebAppStorageAccountConnection.runSample(azureResourceManager)); } @Test @DoNotRecord public void testManageLinuxWebAppWithContainerRegistry() throws IOException, InterruptedException { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageLinuxWebAppWithContainerRegistry.runSample(azureResourceManager)); } @Test @DoNotRecord public void testManageFunctionAppWithAuthentication() throws GitAPIException { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageFunctionAppWithAuthentication.runSample(azureResourceManager)); } @Test @DoNotRecord public void testManageFunctionAppSourceControl() throws GitAPIException { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageFunctionAppSourceControl.runSample(azureResourceManager)); } @Test @DoNotRecord public void testManageLinuxWebAppCosmosDbByMsi() throws IOException, InterruptedException { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageLinuxWebAppCosmosDbByMsi.runSample(azureResourceManager, "")); } @Test @DoNotRecord public void testManageWebAppCosmosDbByMsi() { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageWebAppCosmosDbByMsi.runSample(azureResourceManager, credentialFromFile(), clientIdFromFile())); } @Test @DoNotRecord public void testManageWebAppCosmosDbThroughKeyVault() { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageWebAppCosmosDbThroughKeyVault.runSample(azureResourceManager, clientIdFromFile())); } @Test @DoNotRecord @Test @DoNotRecord public void testManageWebAppLogs() throws IOException { if (skipInPlayback()) { return; } azureResourceManager = AzureResourceManager.authenticate( setReadTimeout(azureResourceManager.storageAccounts().manager().httpPipeline(), Duration.ofMinutes(10)), new AzureProfile(azureResourceManager.tenantId(), azureResourceManager.subscriptionId(), AzureEnvironment.AZURE) ) .withDefaultSubscription(); Assertions.assertTrue(ManageWebAppLogs.runSample(azureResourceManager)); } private HttpPipeline setReadTimeout(HttpPipeline httpPipeline, Duration timeout) { HttpPipelineBuilder builder = new HttpPipelineBuilder(); for (int i = 0; i < httpPipeline.getPolicyCount(); ++i) { builder.policies(httpPipeline.getPolicy(i)); } builder.httpClient( super.generateHttpClientWithProxy( new NettyAsyncHttpClientBuilder() .readTimeout(timeout), null ) ); return builder.build(); } @Test @DoNotRecord public void testManageLinuxFunctionAppSourceControl() { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageLinuxFunctionAppSourceControl.runSample(azureResourceManager)); } @Test @DoNotRecord public void testManageWebAppWithDomainSsl() throws IOException { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageWebAppWithDomainSsl.runSample(azureResourceManager)); } @Test @DoNotRecord public void testManageWebAppWithTrafficManager() throws IOException { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageWebAppWithTrafficManager.runSample(azureResourceManager)); } @Test @DoNotRecord public void testManageLinuxWebAppWithDomainSsl() throws IOException { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageLinuxWebAppWithDomainSsl.runSample(azureResourceManager)); } @Test @DoNotRecord public void testManageLinuxWebAppWithTrafficManager() throws IOException { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageLinuxWebAppWithTrafficManager.runSample(azureResourceManager)); } @Test @DoNotRecord public void testManageFunctionAppWithDomainSsl() throws IOException { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageFunctionAppWithDomainSsl.runSample(azureResourceManager)); } }
class AppServiceSampleLiveOnlyTests extends SamplesTestBase { @Test @DoNotRecord public void testManageWebAppSourceControl() throws GitAPIException { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageWebAppSourceControl.runSample(azureResourceManager)); } @Test @DoNotRecord public void testManageWebAppStorageAccountConnection() { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageWebAppStorageAccountConnection.runSample(azureResourceManager)); } @Test @DoNotRecord public void testManageLinuxWebAppSourceControl() throws GitAPIException { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageLinuxWebAppSourceControl.runSample(azureResourceManager)); } @Test @DoNotRecord public void testManageLinuxWebAppStorageAccountConnection() { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageLinuxWebAppStorageAccountConnection.runSample(azureResourceManager)); } @Test @DoNotRecord public void testManageLinuxWebAppWithContainerRegistry() throws IOException, InterruptedException { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageLinuxWebAppWithContainerRegistry.runSample(azureResourceManager)); } @Test @DoNotRecord public void testManageFunctionAppWithAuthentication() throws GitAPIException { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageFunctionAppWithAuthentication.runSample(azureResourceManager)); } @Test @DoNotRecord public void testManageFunctionAppSourceControl() throws GitAPIException { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageFunctionAppSourceControl.runSample(azureResourceManager)); } @Test @DoNotRecord public void testManageLinuxWebAppCosmosDbByMsi() throws IOException, InterruptedException { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageLinuxWebAppCosmosDbByMsi.runSample(azureResourceManager, "")); } @Test @DoNotRecord public void testManageWebAppCosmosDbByMsi() { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageWebAppCosmosDbByMsi.runSample(azureResourceManager, credentialFromFile(), clientIdFromFile())); } @Test @DoNotRecord public void testManageWebAppCosmosDbThroughKeyVault() { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageWebAppCosmosDbThroughKeyVault.runSample(azureResourceManager, clientIdFromFile())); } @Test @DoNotRecord @Test @DoNotRecord public void testManageWebAppLogs() throws IOException { if (skipInPlayback()) { return; } azureResourceManager = AzureResourceManager.authenticate( setReadTimeout(azureResourceManager.storageAccounts().manager().httpPipeline(), Duration.ofMinutes(10)), new AzureProfile(azureResourceManager.tenantId(), azureResourceManager.subscriptionId(), AzureEnvironment.AZURE) ) .withDefaultSubscription(); Assertions.assertTrue(ManageWebAppLogs.runSample(azureResourceManager)); } private HttpPipeline setReadTimeout(HttpPipeline httpPipeline, Duration timeout) { HttpPipelineBuilder builder = new HttpPipelineBuilder(); for (int i = 0; i < httpPipeline.getPolicyCount(); ++i) { builder.policies(httpPipeline.getPolicy(i)); } builder.httpClient( super.generateHttpClientWithProxy( new NettyAsyncHttpClientBuilder() .readTimeout(timeout), null ) ); return builder.build(); } @Test @DoNotRecord public void testManageLinuxFunctionAppSourceControl() { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageLinuxFunctionAppSourceControl.runSample(azureResourceManager)); } @Test @DoNotRecord public void testManageWebAppWithDomainSsl() throws IOException { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageWebAppWithDomainSsl.runSample(azureResourceManager)); } @Test @DoNotRecord public void testManageWebAppWithTrafficManager() throws IOException { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageWebAppWithTrafficManager.runSample(azureResourceManager)); } @Test @DoNotRecord public void testManageLinuxWebAppWithDomainSsl() throws IOException { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageLinuxWebAppWithDomainSsl.runSample(azureResourceManager)); } @Test @DoNotRecord public void testManageLinuxWebAppWithTrafficManager() throws IOException { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageLinuxWebAppWithTrafficManager.runSample(azureResourceManager)); } @Test @DoNotRecord public void testManageFunctionAppWithDomainSsl() throws IOException { if (skipInPlayback()) { return; } Assertions.assertTrue(ManageFunctionAppWithDomainSsl.runSample(azureResourceManager)); } }
Why vm size changes here?
public static boolean runSample(AzureResourceManager azureResourceManager, String clientId, String secret) throws IOException, JSchException, InterruptedException { final String rgName = Utils.randomResourceName(azureResourceManager, "rgaks", 15); final String acrName = Utils.randomResourceName(azureResourceManager, "acrsample", 20); final String aksName = Utils.randomResourceName(azureResourceManager, "akssample", 30); final String rootUserName = "aksuser"; final Region region = Region.US_EAST; final String dockerImageName = "nginx"; final String dockerImageTag = "latest"; final String dockerContainerName = "acrsample-nginx"; String aksSecretName = "mysecret112233"; String aksNamespace = "acrsample"; String aksLbIngressName = "lb-acrsample"; String servicePrincipalClientId = clientId; String servicePrincipalSecret = secret; try { if (servicePrincipalClientId == null || servicePrincipalClientId.isEmpty() || servicePrincipalSecret == null || servicePrincipalSecret.isEmpty()) { servicePrincipalClientId = System.getenv("AZURE_CLIENT_ID"); servicePrincipalSecret = System.getenv("AZURE_CLIENT_SECRET"); if (servicePrincipalClientId == null || servicePrincipalClientId.isEmpty() || servicePrincipalSecret == null || servicePrincipalSecret.isEmpty()) { String envSecondaryServicePrincipal = System.getenv("AZURE_AUTH_LOCATION_2"); if (envSecondaryServicePrincipal == null || !envSecondaryServicePrincipal.isEmpty() || !Files.exists(Paths.get(envSecondaryServicePrincipal))) { envSecondaryServicePrincipal = System.getenv("AZURE_AUTH_LOCATION"); } servicePrincipalClientId = Utils.getSecondaryServicePrincipalClientID(envSecondaryServicePrincipal); servicePrincipalSecret = Utils.getSecondaryServicePrincipalSecret(envSecondaryServicePrincipal); } } System.out.println("Creating an SSH private and public key pair"); SSHShell.SshPublicPrivateKey sshKeys = SSHShell.generateSSHKeys("", "ACS"); System.out.println("SSH private key value: %n" + sshKeys.getSshPrivateKey()); System.out.println("SSH public key value: %n" + sshKeys.getSshPublicKey()); System.out.println("Creating an Azure Container Service with managed Kubernetes cluster and one agent pool with one virtual machine"); Date t1 = new Date(); KubernetesCluster kubernetesCluster = azureResourceManager.kubernetesClusters().define(aksName) .withRegion(region) .withNewResourceGroup(rgName) .withDefaultVersion() .withRootUsername(rootUserName) .withSshKey(sshKeys.getSshPublicKey()) .withServicePrincipalClientId(servicePrincipalClientId) .withServicePrincipalSecret(servicePrincipalSecret) .defineAgentPool("agentpool") .withVirtualMachineSize(ContainerServiceVMSizeTypes.STANDARD_D2_V2) .withAgentPoolVirtualMachineCount(1) .withAgentPoolMode(AgentPoolMode.SYSTEM) .attach() .withDnsPrefix("dns-" + aksName) .create(); Date t2 = new Date(); System.out.println("Created Azure Container Service (AKS) resource: (took " + ((t2.getTime() - t1.getTime()) / 1000) + " seconds) " + kubernetesCluster.id()); Utils.print(kubernetesCluster); System.out.println("Creating an Azure Container Registry"); t1 = new Date(); Registry azureRegistry = azureResourceManager.containerRegistries().define(acrName) .withRegion(region) .withNewResourceGroup(rgName) .withBasicSku() .withRegistryNameAsAdminUser() .create(); t2 = new Date(); System.out.println("Created Azure Container Registry: (took " + ((t2.getTime() - t1.getTime()) / 1000) + " seconds) " + azureRegistry.id()); Utils.print(azureRegistry); RegistryCredentials acrCredentials = azureRegistry.getCredentials(); DockerClient dockerClient = DockerUtils.createDockerClient(azureResourceManager, rgName, region, azureRegistry.loginServerUrl(), acrCredentials.username(), acrCredentials.accessKeys().get(AccessKeyType.PRIMARY)); dockerClient.pullImageCmd(dockerImageName) .withTag(dockerImageTag) .withAuthConfig(new AuthConfig()) .exec(new PullImageResultCallback()) .awaitCompletion(); System.out.println("List local Docker images:"); List<Image> images = dockerClient.listImagesCmd().withShowAll(true).exec(); for (Image image : images) { System.out.format("\tFound Docker image %s (%s)%n", image.getRepoTags()[0], image.getId()); } CreateContainerResponse dockerContainerInstance = dockerClient.createContainerCmd(dockerImageName + ":" + dockerImageTag) .withName(dockerContainerName) .withCmd("/hello") .exec(); System.out.println("List Docker containers:"); List<Container> dockerContainers = dockerClient.listContainersCmd() .withShowAll(true) .exec(); for (Container container : dockerContainers) { System.out.format("\tFound Docker container %s (%s)%n", container.getImage(), container.getId()); } String privateRepoUrl = azureRegistry.loginServerUrl() + "/samples/" + dockerContainerName; dockerClient.commitCmd(dockerContainerInstance.getId()) .withRepository(privateRepoUrl) .withTag("latest").exec(); dockerClient.removeContainerCmd(dockerContainerInstance.getId()) .withForce(true) .exec(); dockerClient.pushImageCmd(privateRepoUrl) .withAuthConfig(dockerClient.authConfig()) .exec(new PushImageResultCallback()).awaitSuccess(); try { dockerClient.removeImageCmd(dockerImageName + ":" + dockerImageTag).withForce(true).exec(); } catch (NotFoundException e) { } dockerClient.pullImageCmd(privateRepoUrl) .withAuthConfig(dockerClient.authConfig()) .exec(new PullImageResultCallback()).awaitCompletion(); System.out.println("List local Docker images after pulling sample image from the Azure Container Registry:"); images = dockerClient.listImagesCmd() .withShowAll(true) .exec(); for (Image image : images) { System.out.format("\tFound Docker image %s (%s)%n", image.getRepoTags()[0], image.getId()); } dockerClient.createContainerCmd(privateRepoUrl) .withName(dockerContainerName + "-private") .withCmd("/hello").exec(); System.out.println("List Docker containers after instantiating container from the Azure Container Registry sample image:"); dockerContainers = dockerClient.listContainersCmd() .withShowAll(true) .exec(); for (Container container : dockerContainers) { System.out.format("\tFound Docker container %s (%s)%n", container.getImage(), container.getId()); } kubernetesCluster = azureResourceManager.kubernetesClusters().getByResourceGroup(rgName, aksName); System.out.println("Found Kubernetes master at: " + kubernetesCluster.fqdn()); byte[] kubeConfigContent = kubernetesCluster.adminKubeConfigContent(); File tempKubeConfigFile = File.createTempFile("kube", ".config", new File(System.getProperty("java.io.tmpdir"))); tempKubeConfigFile.deleteOnExit(); try (BufferedWriter buffOut = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(tempKubeConfigFile), StandardCharsets.UTF_8))) { buffOut.write(new String(kubeConfigContent, StandardCharsets.UTF_8)); } System.setProperty(Config.KUBERNETES_KUBECONFIG_FILE, tempKubeConfigFile.getPath()); Config config = new Config(); KubernetesClient kubernetesClient = new DefaultKubernetesClient(config); System.out.println(kubernetesClient.nodes().list()); Namespace ns = new NamespaceBuilder() .withNewMetadata() .withName(aksNamespace) .addToLabels("acr", "sample") .endMetadata() .build(); try { System.out.println("Created namespace" + kubernetesClient.namespaces().create(ns)); } catch (Exception e) { System.err.println(e.getMessage()); } ResourceManagerUtils.sleep(Duration.ofSeconds(5)); for (Namespace namespace : kubernetesClient.namespaces().list().getItems()) { System.out.println("\tFound Kubernetes namespace: " + namespace.toString()); } String basicAuth = new String(Base64.encodeBase64((acrCredentials.username() + ":" + acrCredentials.accessKeys().get(AccessKeyType.PRIMARY)).getBytes(StandardCharsets.UTF_8)), StandardCharsets.UTF_8); HashMap<String, String> secretData = new HashMap<>(1); String dockerCfg = String.format("{ \"%s\": { \"auth\": \"%s\", \"email\": \"%s\" } }", azureRegistry.loginServerUrl(), basicAuth, "acrsample@azure.com"); dockerCfg = new String(Base64.encodeBase64(dockerCfg.getBytes("UTF-8")), "UTF-8"); secretData.put(".dockercfg", dockerCfg); SecretBuilder secretBuilder = new SecretBuilder() .withNewMetadata() .withName(aksSecretName) .withNamespace(aksNamespace) .endMetadata() .withData(secretData) .withType("kubernetes.io/dockercfg"); System.out.println("Creating new secret: " + kubernetesClient.secrets().inNamespace(aksNamespace).create(secretBuilder.build())); ResourceManagerUtils.sleep(Duration.ofSeconds(5)); for (Secret kubeS : kubernetesClient.secrets().inNamespace(aksNamespace).list().getItems()) { System.out.println("\tFound secret: " + kubeS); } ReplicationController rc = new ReplicationControllerBuilder() .withNewMetadata() .withName("acrsample-rc") .withNamespace(aksNamespace) .addToLabels("acrsample-nginx", "nginx") .endMetadata() .withNewSpec() .withReplicas(2) .withNewTemplate() .withNewMetadata() .addToLabels("acrsample-nginx", "nginx") .endMetadata() .withNewSpec() .addNewImagePullSecret(aksSecretName) .addNewContainer() .withName("acrsample-pod-nginx") .withImage(privateRepoUrl) .addNewPort() .withContainerPort(80) .endPort() .endContainer() .endSpec() .endTemplate() .endSpec() .build(); System.out.println("Creating a replication controller: " + kubernetesClient.replicationControllers().inNamespace(aksNamespace).create(rc)); ResourceManagerUtils.sleep(Duration.ofSeconds(5)); rc = kubernetesClient.replicationControllers().inNamespace(aksNamespace).withName("acrsample-rc").get(); System.out.println("Found replication controller: " + rc.toString()); for (Pod pod : kubernetesClient.pods().inNamespace(aksNamespace).list().getItems()) { System.out.println("\tFound Kubernetes pods: " + pod.toString()); } Service lbService = new ServiceBuilder() .withNewMetadata() .withName(aksLbIngressName) .withNamespace(aksNamespace) .endMetadata() .withNewSpec() .withType("LoadBalancer") .addNewPort() .withPort(80) .withProtocol("TCP") .endPort() .addToSelector("acrsample-nginx", "nginx") .endSpec() .build(); System.out.println("Creating a service: " + kubernetesClient.services().inNamespace(aksNamespace).create(lbService)); ResourceManagerUtils.sleep(Duration.ofSeconds(5)); System.out.println("\tFound service: " + kubernetesClient.services().inNamespace(aksNamespace).withName(aksLbIngressName).get()); int timeout = 30 * 60 * 1000; String matchIPV4 = "^(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)(\\.(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)){3}$"; while (timeout > 0) { try { List<LoadBalancerIngress> lbIngressList = kubernetesClient.services().inNamespace(aksNamespace).withName(aksLbIngressName).get().getStatus().getLoadBalancer().getIngress(); if (lbIngressList != null && !lbIngressList.isEmpty() && lbIngressList.get(0) != null && lbIngressList.get(0).getIp().matches(matchIPV4)) { System.out.println("\tFound ingress IP: " + lbIngressList.get(0).getIp()); timeout = 0; } } catch (Exception e) { } if (timeout > 0) { timeout -= 30000; ResourceManagerUtils.sleep(Duration.ofSeconds(30)); } } kubernetesClient.namespaces().delete(ns); return true; } finally { try { System.out.println("Deleting Resource Group: " + rgName); azureResourceManager.resourceGroups().beginDeleteByName(rgName); System.out.println("Deleted Resource Group: " + rgName); } catch (NullPointerException npe) { System.out.println("Did not create any resources in Azure. No clean up is necessary"); } catch (Exception g) { g.printStackTrace(); } } }
.withVirtualMachineSize(ContainerServiceVMSizeTypes.STANDARD_D2_V2)
public static boolean runSample(AzureResourceManager azureResourceManager, String clientId, String secret) throws IOException, JSchException, InterruptedException { final String rgName = Utils.randomResourceName(azureResourceManager, "rgaks", 15); final String acrName = Utils.randomResourceName(azureResourceManager, "acrsample", 20); final String aksName = Utils.randomResourceName(azureResourceManager, "akssample", 30); final String rootUserName = "aksuser"; final Region region = Region.US_EAST; final String dockerImageName = "nginx"; final String dockerImageTag = "latest"; final String dockerContainerName = "acrsample-nginx"; String aksSecretName = "mysecret112233"; String aksNamespace = "acrsample"; String aksLbIngressName = "lb-acrsample"; String servicePrincipalClientId = clientId; String servicePrincipalSecret = secret; try { if (servicePrincipalClientId == null || servicePrincipalClientId.isEmpty() || servicePrincipalSecret == null || servicePrincipalSecret.isEmpty()) { servicePrincipalClientId = System.getenv("AZURE_CLIENT_ID"); servicePrincipalSecret = System.getenv("AZURE_CLIENT_SECRET"); if (servicePrincipalClientId == null || servicePrincipalClientId.isEmpty() || servicePrincipalSecret == null || servicePrincipalSecret.isEmpty()) { String envSecondaryServicePrincipal = System.getenv("AZURE_AUTH_LOCATION_2"); if (envSecondaryServicePrincipal == null || !envSecondaryServicePrincipal.isEmpty() || !Files.exists(Paths.get(envSecondaryServicePrincipal))) { envSecondaryServicePrincipal = System.getenv("AZURE_AUTH_LOCATION"); } servicePrincipalClientId = Utils.getSecondaryServicePrincipalClientID(envSecondaryServicePrincipal); servicePrincipalSecret = Utils.getSecondaryServicePrincipalSecret(envSecondaryServicePrincipal); } } System.out.println("Creating an SSH private and public key pair"); SSHShell.SshPublicPrivateKey sshKeys = SSHShell.generateSSHKeys("", "ACS"); System.out.println("SSH private key value: %n" + sshKeys.getSshPrivateKey()); System.out.println("SSH public key value: %n" + sshKeys.getSshPublicKey()); System.out.println("Creating an Azure Container Service with managed Kubernetes cluster and one agent pool with one virtual machine"); Date t1 = new Date(); KubernetesCluster kubernetesCluster = azureResourceManager.kubernetesClusters().define(aksName) .withRegion(region) .withNewResourceGroup(rgName) .withDefaultVersion() .withRootUsername(rootUserName) .withSshKey(sshKeys.getSshPublicKey()) .withServicePrincipalClientId(servicePrincipalClientId) .withServicePrincipalSecret(servicePrincipalSecret) .defineAgentPool("agentpool") .withVirtualMachineSize(ContainerServiceVMSizeTypes.STANDARD_D2_V2) .withAgentPoolVirtualMachineCount(1) .withAgentPoolMode(AgentPoolMode.SYSTEM) .attach() .withDnsPrefix("dns-" + aksName) .create(); Date t2 = new Date(); System.out.println("Created Azure Container Service (AKS) resource: (took " + ((t2.getTime() - t1.getTime()) / 1000) + " seconds) " + kubernetesCluster.id()); Utils.print(kubernetesCluster); System.out.println("Creating an Azure Container Registry"); t1 = new Date(); Registry azureRegistry = azureResourceManager.containerRegistries().define(acrName) .withRegion(region) .withNewResourceGroup(rgName) .withBasicSku() .withRegistryNameAsAdminUser() .create(); t2 = new Date(); System.out.println("Created Azure Container Registry: (took " + ((t2.getTime() - t1.getTime()) / 1000) + " seconds) " + azureRegistry.id()); Utils.print(azureRegistry); RegistryCredentials acrCredentials = azureRegistry.getCredentials(); DockerClient dockerClient = DockerUtils.createDockerClient(azureResourceManager, rgName, region, azureRegistry.loginServerUrl(), acrCredentials.username(), acrCredentials.accessKeys().get(AccessKeyType.PRIMARY)); dockerClient.pullImageCmd(dockerImageName) .withTag(dockerImageTag) .withAuthConfig(new AuthConfig()) .exec(new PullImageResultCallback()) .awaitCompletion(); System.out.println("List local Docker images:"); List<Image> images = dockerClient.listImagesCmd().withShowAll(true).exec(); for (Image image : images) { System.out.format("\tFound Docker image %s (%s)%n", image.getRepoTags()[0], image.getId()); } CreateContainerResponse dockerContainerInstance = dockerClient.createContainerCmd(dockerImageName + ":" + dockerImageTag) .withName(dockerContainerName) .withCmd("/hello") .exec(); System.out.println("List Docker containers:"); List<Container> dockerContainers = dockerClient.listContainersCmd() .withShowAll(true) .exec(); for (Container container : dockerContainers) { System.out.format("\tFound Docker container %s (%s)%n", container.getImage(), container.getId()); } String privateRepoUrl = azureRegistry.loginServerUrl() + "/samples/" + dockerContainerName; dockerClient.commitCmd(dockerContainerInstance.getId()) .withRepository(privateRepoUrl) .withTag("latest").exec(); dockerClient.removeContainerCmd(dockerContainerInstance.getId()) .withForce(true) .exec(); dockerClient.pushImageCmd(privateRepoUrl) .withAuthConfig(dockerClient.authConfig()) .exec(new PushImageResultCallback()).awaitSuccess(); try { dockerClient.removeImageCmd(dockerImageName + ":" + dockerImageTag).withForce(true).exec(); } catch (NotFoundException e) { } dockerClient.pullImageCmd(privateRepoUrl) .withAuthConfig(dockerClient.authConfig()) .exec(new PullImageResultCallback()).awaitCompletion(); System.out.println("List local Docker images after pulling sample image from the Azure Container Registry:"); images = dockerClient.listImagesCmd() .withShowAll(true) .exec(); for (Image image : images) { System.out.format("\tFound Docker image %s (%s)%n", image.getRepoTags()[0], image.getId()); } dockerClient.createContainerCmd(privateRepoUrl) .withName(dockerContainerName + "-private") .withCmd("/hello").exec(); System.out.println("List Docker containers after instantiating container from the Azure Container Registry sample image:"); dockerContainers = dockerClient.listContainersCmd() .withShowAll(true) .exec(); for (Container container : dockerContainers) { System.out.format("\tFound Docker container %s (%s)%n", container.getImage(), container.getId()); } kubernetesCluster = azureResourceManager.kubernetesClusters().getByResourceGroup(rgName, aksName); System.out.println("Found Kubernetes master at: " + kubernetesCluster.fqdn()); byte[] kubeConfigContent = kubernetesCluster.adminKubeConfigContent(); File tempKubeConfigFile = File.createTempFile("kube", ".config", new File(System.getProperty("java.io.tmpdir"))); tempKubeConfigFile.deleteOnExit(); try (BufferedWriter buffOut = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(tempKubeConfigFile), StandardCharsets.UTF_8))) { buffOut.write(new String(kubeConfigContent, StandardCharsets.UTF_8)); } System.setProperty(Config.KUBERNETES_KUBECONFIG_FILE, tempKubeConfigFile.getPath()); Config config = new Config(); KubernetesClient kubernetesClient = new DefaultKubernetesClient(config); System.out.println(kubernetesClient.nodes().list()); Namespace ns = new NamespaceBuilder() .withNewMetadata() .withName(aksNamespace) .addToLabels("acr", "sample") .endMetadata() .build(); try { System.out.println("Created namespace" + kubernetesClient.namespaces().create(ns)); } catch (Exception e) { System.err.println(e.getMessage()); } ResourceManagerUtils.sleep(Duration.ofSeconds(5)); for (Namespace namespace : kubernetesClient.namespaces().list().getItems()) { System.out.println("\tFound Kubernetes namespace: " + namespace.toString()); } String basicAuth = new String(Base64.encodeBase64((acrCredentials.username() + ":" + acrCredentials.accessKeys().get(AccessKeyType.PRIMARY)).getBytes(StandardCharsets.UTF_8)), StandardCharsets.UTF_8); HashMap<String, String> secretData = new HashMap<>(1); String dockerCfg = String.format("{ \"%s\": { \"auth\": \"%s\", \"email\": \"%s\" } }", azureRegistry.loginServerUrl(), basicAuth, "acrsample@azure.com"); dockerCfg = new String(Base64.encodeBase64(dockerCfg.getBytes("UTF-8")), "UTF-8"); secretData.put(".dockercfg", dockerCfg); SecretBuilder secretBuilder = new SecretBuilder() .withNewMetadata() .withName(aksSecretName) .withNamespace(aksNamespace) .endMetadata() .withData(secretData) .withType("kubernetes.io/dockercfg"); System.out.println("Creating new secret: " + kubernetesClient.secrets().inNamespace(aksNamespace).create(secretBuilder.build())); ResourceManagerUtils.sleep(Duration.ofSeconds(5)); for (Secret kubeS : kubernetesClient.secrets().inNamespace(aksNamespace).list().getItems()) { System.out.println("\tFound secret: " + kubeS); } ReplicationController rc = new ReplicationControllerBuilder() .withNewMetadata() .withName("acrsample-rc") .withNamespace(aksNamespace) .addToLabels("acrsample-nginx", "nginx") .endMetadata() .withNewSpec() .withReplicas(2) .withNewTemplate() .withNewMetadata() .addToLabels("acrsample-nginx", "nginx") .endMetadata() .withNewSpec() .addNewImagePullSecret(aksSecretName) .addNewContainer() .withName("acrsample-pod-nginx") .withImage(privateRepoUrl) .addNewPort() .withContainerPort(80) .endPort() .endContainer() .endSpec() .endTemplate() .endSpec() .build(); System.out.println("Creating a replication controller: " + kubernetesClient.replicationControllers().inNamespace(aksNamespace).create(rc)); ResourceManagerUtils.sleep(Duration.ofSeconds(5)); rc = kubernetesClient.replicationControllers().inNamespace(aksNamespace).withName("acrsample-rc").get(); System.out.println("Found replication controller: " + rc.toString()); for (Pod pod : kubernetesClient.pods().inNamespace(aksNamespace).list().getItems()) { System.out.println("\tFound Kubernetes pods: " + pod.toString()); } Service lbService = new ServiceBuilder() .withNewMetadata() .withName(aksLbIngressName) .withNamespace(aksNamespace) .endMetadata() .withNewSpec() .withType("LoadBalancer") .addNewPort() .withPort(80) .withProtocol("TCP") .endPort() .addToSelector("acrsample-nginx", "nginx") .endSpec() .build(); System.out.println("Creating a service: " + kubernetesClient.services().inNamespace(aksNamespace).create(lbService)); ResourceManagerUtils.sleep(Duration.ofSeconds(5)); System.out.println("\tFound service: " + kubernetesClient.services().inNamespace(aksNamespace).withName(aksLbIngressName).get()); int timeout = 30 * 60 * 1000; String matchIPV4 = "^(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)(\\.(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)){3}$"; while (timeout > 0) { try { List<LoadBalancerIngress> lbIngressList = kubernetesClient.services().inNamespace(aksNamespace).withName(aksLbIngressName).get().getStatus().getLoadBalancer().getIngress(); if (lbIngressList != null && !lbIngressList.isEmpty() && lbIngressList.get(0) != null && lbIngressList.get(0).getIp().matches(matchIPV4)) { System.out.println("\tFound ingress IP: " + lbIngressList.get(0).getIp()); timeout = 0; } } catch (Exception e) { } if (timeout > 0) { timeout -= 30000; ResourceManagerUtils.sleep(Duration.ofSeconds(30)); } } kubernetesClient.namespaces().delete(ns); return true; } finally { try { System.out.println("Deleting Resource Group: " + rgName); azureResourceManager.resourceGroups().beginDeleteByName(rgName); System.out.println("Deleted Resource Group: " + rgName); } catch (NullPointerException npe) { System.out.println("Did not create any resources in Azure. No clean up is necessary"); } catch (Exception g) { g.printStackTrace(); } } }
class DeployImageFromContainerRegistryToKubernetes { /** * Main function which runs the actual sample. * * @param azureResourceManager instance of the azure client * @param clientId secondary service principal client ID * @param secret secondary service principal secret * @return true if sample runs successfully */ /** * Main entry point. * * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager, "", ""); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } }
class DeployImageFromContainerRegistryToKubernetes { /** * Main function which runs the actual sample. * * @param azureResourceManager instance of the azure client * @param clientId secondary service principal client ID * @param secret secondary service principal secret * @return true if sample runs successfully */ /** * Main entry point. * * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager, "", ""); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } }
Long term, we might probably try fix this docker file (or rather, use JAR deployment directly).
public static boolean runSample(AzureResourceManager azure, String clientId) throws IOException, InterruptedException { final Region region = Region.US_WEST; final String acrName = Utils.randomResourceName(azure, "acr", 20); final String appName = Utils.randomResourceName(azure, "webapp1-", 20); final String password = Utils.password(); final String rgName = Utils.randomResourceName(azure, "rg1NEMV_", 24); final String vaultName = Utils.randomResourceName(azure, "vault", 20); final String cosmosName = Utils.randomResourceName(azure, "cosmosdb", 20); String servicePrincipalClientId = clientId; try { System.out.println("Creating a CosmosDB..."); CosmosDBAccount cosmosDBAccount = azure.cosmosDBAccounts().define(cosmosName) .withRegion(region) .withNewResourceGroup(rgName) .withDataModelSql() .withStrongConsistency() .create(); System.out.println("Created CosmosDB"); Utils.print(cosmosDBAccount); ServicePrincipal servicePrincipal = azure.accessManagement().servicePrincipals() .define(appName) .withNewApplication("http: .definePasswordCredential("password") .withPasswordValue(password) .attach() .create(); if (servicePrincipalClientId == null || servicePrincipalClientId.isEmpty()) { servicePrincipalClientId = System.getenv("AZURE_CLIENT_ID"); if (servicePrincipalClientId == null || servicePrincipalClientId.isEmpty()) { String envSecondaryServicePrincipal = System.getenv("AZURE_AUTH_LOCATION_2"); if (envSecondaryServicePrincipal == null || !envSecondaryServicePrincipal.isEmpty() || !Files.exists(Paths.get(envSecondaryServicePrincipal))) { envSecondaryServicePrincipal = System.getenv("AZURE_AUTH_LOCATION"); } servicePrincipalClientId = Utils.getSecondaryServicePrincipalClientID(envSecondaryServicePrincipal); } } Vault vault = azure.vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(rgName) .defineAccessPolicy() .forServicePrincipal(servicePrincipalClientId) .allowSecretAllPermissions() .attach() .defineAccessPolicy() .forServicePrincipal(servicePrincipal) .allowSecretPermissions(SecretPermissions.GET, SecretPermissions.LIST) .attach() .create(); vault.secrets().define("azure-documentdb-uri") .withValue(cosmosDBAccount.documentEndpoint()) .create(); vault.secrets().define("azure-documentdb-key") .withValue(cosmosDBAccount.listKeys().primaryMasterKey()) .create(); vault.secrets().define("azure-documentdb-database") .withValue("tododb") .create(); System.out.println("Creating an Azure Container Registry"); long t1 = System.currentTimeMillis(); Registry azureRegistry = azure.containerRegistries().define(acrName) .withRegion(region) .withNewResourceGroup(rgName) .withBasicSku() .withRegistryNameAsAdminUser() .create(); long t2 = System.currentTimeMillis(); System.out.println("Created Azure Container Registry: (took " + ((t2 - t1) / 1000) + " seconds) " + azureRegistry.id()); Utils.print(azureRegistry); RegistryCredentials acrCredentials = azureRegistry.getCredentials(); DockerClient dockerClient = DockerUtils.createDockerClient(azure, rgName, region, azureRegistry.loginServerUrl(), acrCredentials.username(), acrCredentials.accessKeys().get(AccessKeyType.PRIMARY)); String imageName = "tomcat:7.0-slim"; String privateRepoUrl = azureRegistry.loginServerUrl() + "/todoapp"; dockerClient.pullImageCmd(imageName) .withAuthConfig(new AuthConfig()) .exec(new PullImageResultCallback()) .awaitCompletion(); String imageId = dockerClient.inspectImageCmd(imageName).exec().getId(); dockerClient.tagImageCmd(imageId, privateRepoUrl, "latest").exec(); dockerClient.pushImageCmd(privateRepoUrl) .exec(new PushImageResultCallback()).awaitCompletion(); System.out.println("Creating web app " + appName + " in resource group " + rgName + "..."); WebApp app1 = azure.webApps() .define(appName) .withRegion(Region.US_WEST) .withNewResourceGroup(rgName) .withNewLinuxPlan(PricingTier.STANDARD_S1) .withPrivateRegistryImage(privateRepoUrl, azureRegistry.loginServerUrl()) .withCredentials(acrCredentials.username(), acrCredentials.accessKeys().get(AccessKeyType.PRIMARY)) .withAppSetting("AZURE_KEYVAULT_URI", vault.vaultUri()) .withAppSetting("AZURE_KEYVAULT_CLIENT_ID", servicePrincipal.applicationId()) .withAppSetting("AZURE_KEYVAULT_CLIENT_KEY", password) .create(); System.out.println("Created web app " + app1.name()); Utils.print(app1); return true; } finally { try { System.out.println("Deleting Resource Group: " + rgName); azure.resourceGroups().beginDeleteByName(rgName); System.out.println("Deleted Resource Group: " + rgName); } catch (NullPointerException npe) { System.out.println("Did not create any resources in Azure. No clean up is necessary"); } catch (Exception g) { g.printStackTrace(); } } }
String privateRepoUrl = azureRegistry.loginServerUrl() + "/todoapp";
public static boolean runSample(AzureResourceManager azure, String clientId) throws IOException, InterruptedException { final Region region = Region.US_WEST; final String acrName = Utils.randomResourceName(azure, "acr", 20); final String appName = Utils.randomResourceName(azure, "webapp1-", 20); final String password = Utils.password(); final String rgName = Utils.randomResourceName(azure, "rg1NEMV_", 24); final String vaultName = Utils.randomResourceName(azure, "vault", 20); final String cosmosName = Utils.randomResourceName(azure, "cosmosdb", 20); String servicePrincipalClientId = clientId; try { System.out.println("Creating a CosmosDB..."); CosmosDBAccount cosmosDBAccount = azure.cosmosDBAccounts().define(cosmosName) .withRegion(region) .withNewResourceGroup(rgName) .withDataModelSql() .withStrongConsistency() .create(); System.out.println("Created CosmosDB"); Utils.print(cosmosDBAccount); ServicePrincipal servicePrincipal = azure.accessManagement().servicePrincipals() .define(appName) .withNewApplication("http: .definePasswordCredential("password") .withPasswordValue(password) .attach() .create(); if (servicePrincipalClientId == null || servicePrincipalClientId.isEmpty()) { servicePrincipalClientId = System.getenv("AZURE_CLIENT_ID"); if (servicePrincipalClientId == null || servicePrincipalClientId.isEmpty()) { String envSecondaryServicePrincipal = System.getenv("AZURE_AUTH_LOCATION_2"); if (envSecondaryServicePrincipal == null || !envSecondaryServicePrincipal.isEmpty() || !Files.exists(Paths.get(envSecondaryServicePrincipal))) { envSecondaryServicePrincipal = System.getenv("AZURE_AUTH_LOCATION"); } servicePrincipalClientId = Utils.getSecondaryServicePrincipalClientID(envSecondaryServicePrincipal); } } Vault vault = azure.vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(rgName) .defineAccessPolicy() .forServicePrincipal(servicePrincipalClientId) .allowSecretAllPermissions() .attach() .defineAccessPolicy() .forServicePrincipal(servicePrincipal) .allowSecretPermissions(SecretPermissions.GET, SecretPermissions.LIST) .attach() .create(); vault.secrets().define("azure-documentdb-uri") .withValue(cosmosDBAccount.documentEndpoint()) .create(); vault.secrets().define("azure-documentdb-key") .withValue(cosmosDBAccount.listKeys().primaryMasterKey()) .create(); vault.secrets().define("azure-documentdb-database") .withValue("tododb") .create(); System.out.println("Creating an Azure Container Registry"); long t1 = System.currentTimeMillis(); Registry azureRegistry = azure.containerRegistries().define(acrName) .withRegion(region) .withNewResourceGroup(rgName) .withBasicSku() .withRegistryNameAsAdminUser() .create(); long t2 = System.currentTimeMillis(); System.out.println("Created Azure Container Registry: (took " + ((t2 - t1) / 1000) + " seconds) " + azureRegistry.id()); Utils.print(azureRegistry); RegistryCredentials acrCredentials = azureRegistry.getCredentials(); DockerClient dockerClient = DockerUtils.createDockerClient(azure, rgName, region, azureRegistry.loginServerUrl(), acrCredentials.username(), acrCredentials.accessKeys().get(AccessKeyType.PRIMARY)); String imageName = "tomcat:7.0-slim"; String privateRepoUrl = azureRegistry.loginServerUrl() + "/todoapp"; dockerClient.pullImageCmd(imageName) .withAuthConfig(new AuthConfig()) .exec(new PullImageResultCallback()) .awaitCompletion(); String imageId = dockerClient.inspectImageCmd(imageName).exec().getId(); dockerClient.tagImageCmd(imageId, privateRepoUrl, "latest").exec(); dockerClient.pushImageCmd(privateRepoUrl) .exec(new PushImageResultCallback()).awaitCompletion(); System.out.println("Creating web app " + appName + " in resource group " + rgName + "..."); WebApp app1 = azure.webApps() .define(appName) .withRegion(Region.US_WEST) .withNewResourceGroup(rgName) .withNewLinuxPlan(PricingTier.STANDARD_S1) .withPrivateRegistryImage(privateRepoUrl, azureRegistry.loginServerUrl()) .withCredentials(acrCredentials.username(), acrCredentials.accessKeys().get(AccessKeyType.PRIMARY)) .withAppSetting("AZURE_KEYVAULT_URI", vault.vaultUri()) .withAppSetting("AZURE_KEYVAULT_CLIENT_ID", servicePrincipal.applicationId()) .withAppSetting("AZURE_KEYVAULT_CLIENT_KEY", password) .create(); System.out.println("Created web app " + app1.name()); Utils.print(app1); return true; } finally { try { System.out.println("Deleting Resource Group: " + rgName); azure.resourceGroups().beginDeleteByName(rgName); System.out.println("Deleted Resource Group: " + rgName); } catch (NullPointerException npe) { System.out.println("Did not create any resources in Azure. No clean up is necessary"); } catch (Exception g) { g.printStackTrace(); } } }
class ManageLinuxWebAppCosmosDbByMsi { /** * Main function which runs the actual sample. * @param azure instance of the azure client * @param clientId the client ID * @return true if sample runs successfully */ /** * Main entry point. * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager, ""); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } }
class ManageLinuxWebAppCosmosDbByMsi { /** * Main function which runs the actual sample. * @param azure instance of the azure client * @param clientId the client ID * @return true if sample runs successfully */ /** * Main entry point. * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager, ""); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } }
Thanks , that make sense , i will keep it in note and add in it next time when i touch ctl, need this to get merge now
private void createPrePopulatedDocs(int numberOfPreCreatedDocuments) { for (CosmosAsyncContainer container : containers) { AtomicLong successCount = new AtomicLong(0); AtomicLong failureCount = new AtomicLong(0); ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>(); for (int i = 0; i < numberOfPreCreatedDocuments; i++) { String uId = UUID.randomUUID().toString(); PojoizedJson newDoc = BenchmarkHelper.generateDocument(uId, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); Flux<PojoizedJson> obs = container.createItem(newDoc).map(resp -> { PojoizedJson x = resp.getItem(); return x; }).onErrorResume(throwable -> { failureCount.incrementAndGet(); logger.error("Error during pre populating item ", throwable.getMessage()); return Mono.empty(); }).doOnSuccess(pojoizedJson -> { successCount.incrementAndGet(); }).flux(); createDocumentObservables.add(obs); } docsToRead.put(container.getId(), Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block()); logger.info("Finished pre-populating {} documents for container {}", successCount.get() - failureCount.get(), container.getId()); if (failureCount.get() > 0) { logger.info("Failed pre-populating {} documents for container {}", failureCount.get(), container.getId()); } } }
logger.error("Error during pre populating item ", throwable.getMessage());
private void createPrePopulatedDocs(int numberOfPreCreatedDocuments) { for (CosmosAsyncContainer container : containers) { AtomicLong successCount = new AtomicLong(0); AtomicLong failureCount = new AtomicLong(0); ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>(); for (int i = 0; i < numberOfPreCreatedDocuments; i++) { String uId = UUID.randomUUID().toString(); PojoizedJson newDoc = BenchmarkHelper.generateDocument(uId, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); Flux<PojoizedJson> obs = container.createItem(newDoc).map(resp -> { PojoizedJson x = resp.getItem(); return x; }).onErrorResume(throwable -> { failureCount.incrementAndGet(); logger.error("Error during pre populating item ", throwable.getMessage()); return Mono.empty(); }).doOnSuccess(pojoizedJson -> { successCount.incrementAndGet(); }).flux(); createDocumentObservables.add(obs); } docsToRead.put(container.getId(), Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block()); logger.info("Finished pre-populating {} documents for container {}", successCount.get() - failureCount.get(), container.getId()); if (failureCount.get() > 0) { logger.info("Failed pre-populating {} documents for container {}", failureCount.get(), container.getId()); } } }
class AsyncCtlWorkload { private final String PERCENT_PARSING_ERROR = "Unable to parse user provided readWriteQueryPct "; private final String prefixUuidForCreate; private final String dataFieldValue; private final String partitionKey; private final MetricRegistry metricsRegistry = new MetricRegistry(); private final Logger logger; private final CosmosAsyncClient cosmosClient; private final Configuration configuration; private final Map<String, List<PojoizedJson>> docsToRead = new HashMap<>(); private final Semaphore concurrencyControlSemaphore; private final Random random; private Timer readLatency; private Timer writeLatency; private Timer queryLatency; private ScheduledReporter reporter; private Meter readSuccessMeter; private Meter readFailureMeter; private Meter writeSuccessMeter; private Meter writeFailureMeter; private Meter querySuccessMeter; private Meter queryFailureMeter; private CosmosAsyncDatabase cosmosAsyncDatabase; private List<CosmosAsyncContainer> containers = new ArrayList<>(); private List<String> containerToClearAfterTest = new ArrayList<>(); private boolean databaseCreated; private int readPct; private int writePct; private int queryPct; public AsyncCtlWorkload(Configuration cfg) { CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder() .endpoint(cfg.getServiceEndpoint()) .key(cfg.getMasterKey()) .consistencyLevel(cfg.getConsistencyLevel()) .contentResponseOnWriteEnabled(Boolean.parseBoolean(cfg.isContentResponseOnWriteEnabled())); if (cfg.getConnectionMode().equals(ConnectionMode.DIRECT)) { cosmosClientBuilder = cosmosClientBuilder.directMode(DirectConnectionConfig.getDefaultConfig()); } else { GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); gatewayConnectionConfig.setMaxConnectionPoolSize(cfg.getMaxConnectionPoolSize()); cosmosClientBuilder = cosmosClientBuilder.gatewayMode(gatewayConnectionConfig); } cosmosClient = cosmosClientBuilder.buildAsyncClient(); configuration = cfg; logger = LoggerFactory.getLogger(this.getClass()); parsedReadWriteQueryPct(configuration.getReadWriteQueryPct()); createDatabaseAndContainers(configuration); partitionKey = containers.get(0).read().block().getProperties().getPartitionKeyDefinition() .getPaths().iterator().next().split("/")[1]; concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency()); logger.info("PRE-populating {} documents ....", cfg.getNumberOfPreCreatedDocuments()); dataFieldValue = RandomStringUtils.randomAlphabetic(configuration.getDocumentDataFieldSize()); createPrePopulatedDocs(configuration.getNumberOfPreCreatedDocuments()); if (configuration.isEnableJvmStats()) { metricsRegistry.register("gc", new GarbageCollectorMetricSet()); metricsRegistry.register("threads", new CachedThreadStatesGaugeSet(10, TimeUnit.SECONDS)); metricsRegistry.register("memory", new MemoryUsageGaugeSet()); } initializeReporter(cfg); MeterRegistry registry = configuration.getAzureMonitorMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } registry = configuration.getGraphiteMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } prefixUuidForCreate = UUID.randomUUID().toString(); random = new Random(); } public void shutdown() { if (this.databaseCreated) { cosmosAsyncDatabase.delete().block(); logger.info("Deleted temporary database {} created for this test", this.configuration.getDatabaseId()); } else if (containerToClearAfterTest.size() > 0) { for (String id : containerToClearAfterTest) { cosmosAsyncDatabase.getContainer(id).delete().block(); logger.info("Deleted temporary collection {} created for this test", id); } } cosmosClient.close(); } private void performWorkload(BaseSubscriber<Object> documentSubscriber, OperationType type, long i) throws Exception { Flux<? extends Object> obs; CosmosAsyncContainer container = containers.get((int) i % containers.size()); if (type.equals(OperationType.Create)) { PojoizedJson data = BenchmarkHelper.generateDocument(prefixUuidForCreate + i, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); obs = container.createItem(data).flux(); } else if (type.equals(OperationType.Query)) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); String sqlQuery = "Select top 100 * from c order by c._ts"; obs = container.queryItems(sqlQuery, options, PojoizedJson.class).byPage(10); } else { int index = random.nextInt(docsToRead.get(container.getId()).size()); RequestOptions options = new RequestOptions(); String partitionKeyValue = docsToRead.get(container.getId()).get(index).getId(); options.setPartitionKey(new PartitionKey(partitionKeyValue)); obs = container.readItem(docsToRead.get(container.getId()).get(index).getId(), new PartitionKey(partitionKeyValue), PojoizedJson.class) .flux(); } concurrencyControlSemaphore.acquire(); obs.subscribeOn(Schedulers.parallel()).subscribe(documentSubscriber); } public void run() throws Exception { readSuccessMeter = metricsRegistry.meter(" readFailureMeter = metricsRegistry.meter(" writeSuccessMeter = metricsRegistry.meter(" writeFailureMeter = metricsRegistry.meter(" querySuccessMeter = metricsRegistry.meter(" queryFailureMeter = metricsRegistry.meter(" readLatency = metricsRegistry.register("Read Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); writeLatency = metricsRegistry.register("Write Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); queryLatency = metricsRegistry.register("Query Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS); long startTime = System.currentTimeMillis(); AtomicLong count = new AtomicLong(0); long i; int writeRange = readPct + writePct; for (i = 0; BenchmarkHelper.shouldContinue(startTime, i, configuration); i++) { int index = (int) i % 100; if (index < readPct) { BenchmarkRequestSubscriber<Object> readSubscriber = new BenchmarkRequestSubscriber<>(readSuccessMeter, readFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); readSubscriber.context = readLatency.time(); performWorkload(readSubscriber, OperationType.Read, i); } else if (index < writeRange) { BenchmarkRequestSubscriber<Object> writeSubscriber = new BenchmarkRequestSubscriber<>(writeSuccessMeter, writeFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); writeSubscriber.context = writeLatency.time(); performWorkload(writeSubscriber, OperationType.Create, i); } else { BenchmarkRequestSubscriber<Object> querySubscriber = new BenchmarkRequestSubscriber<>(querySuccessMeter, queryFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); querySubscriber.context = queryLatency.time(); performWorkload(querySubscriber, OperationType.Query, i); } } synchronized (count) { while (count.get() < i) { count.wait(); } } long endTime = System.currentTimeMillis(); logger.info("[{}] operations performed in [{}] seconds.", configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); reporter.report(); reporter.close(); } private void parsedReadWriteQueryPct(String readWriteQueryPct) { String[] readWriteQueryPctList = readWriteQueryPct.split(","); if (readWriteQueryPctList.length == 3) { try { if (Integer.valueOf(readWriteQueryPctList[0]) + Integer.valueOf(readWriteQueryPctList[1]) + Integer.valueOf(readWriteQueryPctList[2]) == 100) { readPct = Integer.valueOf(readWriteQueryPctList[0]); writePct = Integer.valueOf(readWriteQueryPctList[1]); queryPct = Integer.valueOf(readWriteQueryPctList[2]); } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } catch (NumberFormatException ex) { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } private void createDatabaseAndContainers(Configuration cfg) { try { cosmosAsyncDatabase = cosmosClient.getDatabase(this.configuration.getDatabaseId()); cosmosAsyncDatabase.read().block(); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosClient.createDatabase(cfg.getDatabaseId(), ThroughputProperties.createManualThroughput(this.configuration.getThroughput())).block(); cosmosAsyncDatabase = cosmosClient.getDatabase(cfg.getDatabaseId()); logger.info("Database {} is created for this test", this.configuration.getDatabaseId()); databaseCreated = true; } else { throw e; } } int numberOfCollection = cfg.getNumberOfCollectionForCtl(); if (numberOfCollection < 1) { numberOfCollection = 1; } for (int i = 1; i <= numberOfCollection; i++) { try { CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); cosmosAsyncContainer.read().block(); containers.add(cosmosAsyncContainer); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosAsyncDatabase.createContainer( this.configuration.getCollectionId() + "_" + i, Configuration.DEFAULT_PARTITION_KEY_PATH ).block(); CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); logger.info("Collection {} is created for this test", this.configuration.getCollectionId() + "_" + i); containers.add(cosmosAsyncContainer); containerToClearAfterTest.add(cosmosAsyncContainer.getId()); } else { throw e; } } } } private void initializeReporter(Configuration configuration) { if (configuration.getGraphiteEndpoint() != null) { final Graphite graphite = new Graphite(new InetSocketAddress( configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort())); reporter = GraphiteReporter.forRegistry(metricsRegistry) .prefixedWith(configuration.getOperationType().name()) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .filter(MetricFilter.ALL) .build(graphite); } else if (configuration.getReportingDirectory() != null) { reporter = CsvReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(configuration.getReportingDirectory()); } else { reporter = ConsoleReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(); } } }
class AsyncCtlWorkload { private final String PERCENT_PARSING_ERROR = "Unable to parse user provided readWriteQueryPct "; private final String prefixUuidForCreate; private final String dataFieldValue; private final String partitionKey; private final MetricRegistry metricsRegistry = new MetricRegistry(); private final Logger logger; private final CosmosAsyncClient cosmosClient; private final Configuration configuration; private final Map<String, List<PojoizedJson>> docsToRead = new HashMap<>(); private final Semaphore concurrencyControlSemaphore; private final Random random; private Timer readLatency; private Timer writeLatency; private Timer queryLatency; private ScheduledReporter reporter; private Meter readSuccessMeter; private Meter readFailureMeter; private Meter writeSuccessMeter; private Meter writeFailureMeter; private Meter querySuccessMeter; private Meter queryFailureMeter; private CosmosAsyncDatabase cosmosAsyncDatabase; private List<CosmosAsyncContainer> containers = new ArrayList<>(); private List<String> containerToClearAfterTest = new ArrayList<>(); private boolean databaseCreated; private int readPct; private int writePct; private int queryPct; public AsyncCtlWorkload(Configuration cfg) { CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder() .endpoint(cfg.getServiceEndpoint()) .key(cfg.getMasterKey()) .consistencyLevel(cfg.getConsistencyLevel()) .contentResponseOnWriteEnabled(Boolean.parseBoolean(cfg.isContentResponseOnWriteEnabled())); if (cfg.getConnectionMode().equals(ConnectionMode.DIRECT)) { cosmosClientBuilder = cosmosClientBuilder.directMode(DirectConnectionConfig.getDefaultConfig()); } else { GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); gatewayConnectionConfig.setMaxConnectionPoolSize(cfg.getMaxConnectionPoolSize()); cosmosClientBuilder = cosmosClientBuilder.gatewayMode(gatewayConnectionConfig); } cosmosClient = cosmosClientBuilder.buildAsyncClient(); configuration = cfg; logger = LoggerFactory.getLogger(this.getClass()); parsedReadWriteQueryPct(configuration.getReadWriteQueryPct()); createDatabaseAndContainers(configuration); partitionKey = containers.get(0).read().block().getProperties().getPartitionKeyDefinition() .getPaths().iterator().next().split("/")[1]; concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency()); logger.info("PRE-populating {} documents ....", cfg.getNumberOfPreCreatedDocuments()); dataFieldValue = RandomStringUtils.randomAlphabetic(configuration.getDocumentDataFieldSize()); createPrePopulatedDocs(configuration.getNumberOfPreCreatedDocuments()); if (configuration.isEnableJvmStats()) { metricsRegistry.register("gc", new GarbageCollectorMetricSet()); metricsRegistry.register("threads", new CachedThreadStatesGaugeSet(10, TimeUnit.SECONDS)); metricsRegistry.register("memory", new MemoryUsageGaugeSet()); } initializeReporter(cfg); MeterRegistry registry = configuration.getAzureMonitorMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } registry = configuration.getGraphiteMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } prefixUuidForCreate = UUID.randomUUID().toString(); random = new Random(); } public void shutdown() { if (this.databaseCreated) { cosmosAsyncDatabase.delete().block(); logger.info("Deleted temporary database {} created for this test", this.configuration.getDatabaseId()); } else if (containerToClearAfterTest.size() > 0) { for (String id : containerToClearAfterTest) { cosmosAsyncDatabase.getContainer(id).delete().block(); logger.info("Deleted temporary collection {} created for this test", id); } } cosmosClient.close(); } private void performWorkload(BaseSubscriber<Object> documentSubscriber, OperationType type, long i) throws Exception { Flux<? extends Object> obs; CosmosAsyncContainer container = containers.get((int) i % containers.size()); if (type.equals(OperationType.Create)) { PojoizedJson data = BenchmarkHelper.generateDocument(prefixUuidForCreate + i, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); obs = container.createItem(data).flux(); } else if (type.equals(OperationType.Query)) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); String sqlQuery = "Select top 100 * from c order by c._ts"; obs = container.queryItems(sqlQuery, options, PojoizedJson.class).byPage(10); } else { int index = random.nextInt(docsToRead.get(container.getId()).size()); RequestOptions options = new RequestOptions(); String partitionKeyValue = docsToRead.get(container.getId()).get(index).getId(); options.setPartitionKey(new PartitionKey(partitionKeyValue)); obs = container.readItem(docsToRead.get(container.getId()).get(index).getId(), new PartitionKey(partitionKeyValue), PojoizedJson.class) .flux(); } concurrencyControlSemaphore.acquire(); obs.subscribeOn(Schedulers.parallel()).subscribe(documentSubscriber); } public void run() throws Exception { readSuccessMeter = metricsRegistry.meter(" readFailureMeter = metricsRegistry.meter(" writeSuccessMeter = metricsRegistry.meter(" writeFailureMeter = metricsRegistry.meter(" querySuccessMeter = metricsRegistry.meter(" queryFailureMeter = metricsRegistry.meter(" readLatency = metricsRegistry.register("Read Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); writeLatency = metricsRegistry.register("Write Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); queryLatency = metricsRegistry.register("Query Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS); long startTime = System.currentTimeMillis(); AtomicLong count = new AtomicLong(0); long i; int writeRange = readPct + writePct; for (i = 0; BenchmarkHelper.shouldContinue(startTime, i, configuration); i++) { int index = (int) i % 100; if (index < readPct) { BenchmarkRequestSubscriber<Object> readSubscriber = new BenchmarkRequestSubscriber<>(readSuccessMeter, readFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); readSubscriber.context = readLatency.time(); performWorkload(readSubscriber, OperationType.Read, i); } else if (index < writeRange) { BenchmarkRequestSubscriber<Object> writeSubscriber = new BenchmarkRequestSubscriber<>(writeSuccessMeter, writeFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); writeSubscriber.context = writeLatency.time(); performWorkload(writeSubscriber, OperationType.Create, i); } else { BenchmarkRequestSubscriber<Object> querySubscriber = new BenchmarkRequestSubscriber<>(querySuccessMeter, queryFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); querySubscriber.context = queryLatency.time(); performWorkload(querySubscriber, OperationType.Query, i); } } synchronized (count) { while (count.get() < i) { count.wait(); } } long endTime = System.currentTimeMillis(); logger.info("[{}] operations performed in [{}] seconds.", configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); reporter.report(); reporter.close(); } private void parsedReadWriteQueryPct(String readWriteQueryPct) { String[] readWriteQueryPctList = readWriteQueryPct.split(","); if (readWriteQueryPctList.length == 3) { try { if (Integer.valueOf(readWriteQueryPctList[0]) + Integer.valueOf(readWriteQueryPctList[1]) + Integer.valueOf(readWriteQueryPctList[2]) == 100) { readPct = Integer.valueOf(readWriteQueryPctList[0]); writePct = Integer.valueOf(readWriteQueryPctList[1]); queryPct = Integer.valueOf(readWriteQueryPctList[2]); } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } catch (NumberFormatException ex) { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } private void createDatabaseAndContainers(Configuration cfg) { try { cosmosAsyncDatabase = cosmosClient.getDatabase(this.configuration.getDatabaseId()); cosmosAsyncDatabase.read().block(); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosClient.createDatabase(cfg.getDatabaseId(), ThroughputProperties.createManualThroughput(this.configuration.getThroughput())).block(); cosmosAsyncDatabase = cosmosClient.getDatabase(cfg.getDatabaseId()); logger.info("Database {} is created for this test", this.configuration.getDatabaseId()); databaseCreated = true; } else { throw e; } } int numberOfCollection = cfg.getNumberOfCollectionForCtl(); if (numberOfCollection < 1) { numberOfCollection = 1; } for (int i = 1; i <= numberOfCollection; i++) { try { CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); cosmosAsyncContainer.read().block(); containers.add(cosmosAsyncContainer); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosAsyncDatabase.createContainer( this.configuration.getCollectionId() + "_" + i, Configuration.DEFAULT_PARTITION_KEY_PATH ).block(); CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); logger.info("Collection {} is created for this test", this.configuration.getCollectionId() + "_" + i); containers.add(cosmosAsyncContainer); containerToClearAfterTest.add(cosmosAsyncContainer.getId()); } else { throw e; } } } } private void initializeReporter(Configuration configuration) { if (configuration.getGraphiteEndpoint() != null) { final Graphite graphite = new Graphite(new InetSocketAddress( configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort())); reporter = GraphiteReporter.forRegistry(metricsRegistry) .prefixedWith(configuration.getOperationType().name()) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .filter(MetricFilter.ALL) .build(graphite); } else if (configuration.getReportingDirectory() != null) { reporter = CsvReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(configuration.getReportingDirectory()); } else { reporter = ConsoleReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(); } } }
The `SYSTEM` agent pool doesn't support 1 cpu VM
public static boolean runSample(AzureResourceManager azureResourceManager, String clientId, String secret) throws IOException, JSchException, InterruptedException { final String rgName = Utils.randomResourceName(azureResourceManager, "rgaks", 15); final String acrName = Utils.randomResourceName(azureResourceManager, "acrsample", 20); final String aksName = Utils.randomResourceName(azureResourceManager, "akssample", 30); final String rootUserName = "aksuser"; final Region region = Region.US_EAST; final String dockerImageName = "nginx"; final String dockerImageTag = "latest"; final String dockerContainerName = "acrsample-nginx"; String aksSecretName = "mysecret112233"; String aksNamespace = "acrsample"; String aksLbIngressName = "lb-acrsample"; String servicePrincipalClientId = clientId; String servicePrincipalSecret = secret; try { if (servicePrincipalClientId == null || servicePrincipalClientId.isEmpty() || servicePrincipalSecret == null || servicePrincipalSecret.isEmpty()) { servicePrincipalClientId = System.getenv("AZURE_CLIENT_ID"); servicePrincipalSecret = System.getenv("AZURE_CLIENT_SECRET"); if (servicePrincipalClientId == null || servicePrincipalClientId.isEmpty() || servicePrincipalSecret == null || servicePrincipalSecret.isEmpty()) { String envSecondaryServicePrincipal = System.getenv("AZURE_AUTH_LOCATION_2"); if (envSecondaryServicePrincipal == null || !envSecondaryServicePrincipal.isEmpty() || !Files.exists(Paths.get(envSecondaryServicePrincipal))) { envSecondaryServicePrincipal = System.getenv("AZURE_AUTH_LOCATION"); } servicePrincipalClientId = Utils.getSecondaryServicePrincipalClientID(envSecondaryServicePrincipal); servicePrincipalSecret = Utils.getSecondaryServicePrincipalSecret(envSecondaryServicePrincipal); } } System.out.println("Creating an SSH private and public key pair"); SSHShell.SshPublicPrivateKey sshKeys = SSHShell.generateSSHKeys("", "ACS"); System.out.println("SSH private key value: %n" + sshKeys.getSshPrivateKey()); System.out.println("SSH public key value: %n" + sshKeys.getSshPublicKey()); System.out.println("Creating an Azure Container Service with managed Kubernetes cluster and one agent pool with one virtual machine"); Date t1 = new Date(); KubernetesCluster kubernetesCluster = azureResourceManager.kubernetesClusters().define(aksName) .withRegion(region) .withNewResourceGroup(rgName) .withDefaultVersion() .withRootUsername(rootUserName) .withSshKey(sshKeys.getSshPublicKey()) .withServicePrincipalClientId(servicePrincipalClientId) .withServicePrincipalSecret(servicePrincipalSecret) .defineAgentPool("agentpool") .withVirtualMachineSize(ContainerServiceVMSizeTypes.STANDARD_D2_V2) .withAgentPoolVirtualMachineCount(1) .withAgentPoolMode(AgentPoolMode.SYSTEM) .attach() .withDnsPrefix("dns-" + aksName) .create(); Date t2 = new Date(); System.out.println("Created Azure Container Service (AKS) resource: (took " + ((t2.getTime() - t1.getTime()) / 1000) + " seconds) " + kubernetesCluster.id()); Utils.print(kubernetesCluster); System.out.println("Creating an Azure Container Registry"); t1 = new Date(); Registry azureRegistry = azureResourceManager.containerRegistries().define(acrName) .withRegion(region) .withNewResourceGroup(rgName) .withBasicSku() .withRegistryNameAsAdminUser() .create(); t2 = new Date(); System.out.println("Created Azure Container Registry: (took " + ((t2.getTime() - t1.getTime()) / 1000) + " seconds) " + azureRegistry.id()); Utils.print(azureRegistry); RegistryCredentials acrCredentials = azureRegistry.getCredentials(); DockerClient dockerClient = DockerUtils.createDockerClient(azureResourceManager, rgName, region, azureRegistry.loginServerUrl(), acrCredentials.username(), acrCredentials.accessKeys().get(AccessKeyType.PRIMARY)); dockerClient.pullImageCmd(dockerImageName) .withTag(dockerImageTag) .withAuthConfig(new AuthConfig()) .exec(new PullImageResultCallback()) .awaitCompletion(); System.out.println("List local Docker images:"); List<Image> images = dockerClient.listImagesCmd().withShowAll(true).exec(); for (Image image : images) { System.out.format("\tFound Docker image %s (%s)%n", image.getRepoTags()[0], image.getId()); } CreateContainerResponse dockerContainerInstance = dockerClient.createContainerCmd(dockerImageName + ":" + dockerImageTag) .withName(dockerContainerName) .withCmd("/hello") .exec(); System.out.println("List Docker containers:"); List<Container> dockerContainers = dockerClient.listContainersCmd() .withShowAll(true) .exec(); for (Container container : dockerContainers) { System.out.format("\tFound Docker container %s (%s)%n", container.getImage(), container.getId()); } String privateRepoUrl = azureRegistry.loginServerUrl() + "/samples/" + dockerContainerName; dockerClient.commitCmd(dockerContainerInstance.getId()) .withRepository(privateRepoUrl) .withTag("latest").exec(); dockerClient.removeContainerCmd(dockerContainerInstance.getId()) .withForce(true) .exec(); dockerClient.pushImageCmd(privateRepoUrl) .withAuthConfig(dockerClient.authConfig()) .exec(new PushImageResultCallback()).awaitSuccess(); try { dockerClient.removeImageCmd(dockerImageName + ":" + dockerImageTag).withForce(true).exec(); } catch (NotFoundException e) { } dockerClient.pullImageCmd(privateRepoUrl) .withAuthConfig(dockerClient.authConfig()) .exec(new PullImageResultCallback()).awaitCompletion(); System.out.println("List local Docker images after pulling sample image from the Azure Container Registry:"); images = dockerClient.listImagesCmd() .withShowAll(true) .exec(); for (Image image : images) { System.out.format("\tFound Docker image %s (%s)%n", image.getRepoTags()[0], image.getId()); } dockerClient.createContainerCmd(privateRepoUrl) .withName(dockerContainerName + "-private") .withCmd("/hello").exec(); System.out.println("List Docker containers after instantiating container from the Azure Container Registry sample image:"); dockerContainers = dockerClient.listContainersCmd() .withShowAll(true) .exec(); for (Container container : dockerContainers) { System.out.format("\tFound Docker container %s (%s)%n", container.getImage(), container.getId()); } kubernetesCluster = azureResourceManager.kubernetesClusters().getByResourceGroup(rgName, aksName); System.out.println("Found Kubernetes master at: " + kubernetesCluster.fqdn()); byte[] kubeConfigContent = kubernetesCluster.adminKubeConfigContent(); File tempKubeConfigFile = File.createTempFile("kube", ".config", new File(System.getProperty("java.io.tmpdir"))); tempKubeConfigFile.deleteOnExit(); try (BufferedWriter buffOut = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(tempKubeConfigFile), StandardCharsets.UTF_8))) { buffOut.write(new String(kubeConfigContent, StandardCharsets.UTF_8)); } System.setProperty(Config.KUBERNETES_KUBECONFIG_FILE, tempKubeConfigFile.getPath()); Config config = new Config(); KubernetesClient kubernetesClient = new DefaultKubernetesClient(config); System.out.println(kubernetesClient.nodes().list()); Namespace ns = new NamespaceBuilder() .withNewMetadata() .withName(aksNamespace) .addToLabels("acr", "sample") .endMetadata() .build(); try { System.out.println("Created namespace" + kubernetesClient.namespaces().create(ns)); } catch (Exception e) { System.err.println(e.getMessage()); } ResourceManagerUtils.sleep(Duration.ofSeconds(5)); for (Namespace namespace : kubernetesClient.namespaces().list().getItems()) { System.out.println("\tFound Kubernetes namespace: " + namespace.toString()); } String basicAuth = new String(Base64.encodeBase64((acrCredentials.username() + ":" + acrCredentials.accessKeys().get(AccessKeyType.PRIMARY)).getBytes(StandardCharsets.UTF_8)), StandardCharsets.UTF_8); HashMap<String, String> secretData = new HashMap<>(1); String dockerCfg = String.format("{ \"%s\": { \"auth\": \"%s\", \"email\": \"%s\" } }", azureRegistry.loginServerUrl(), basicAuth, "acrsample@azure.com"); dockerCfg = new String(Base64.encodeBase64(dockerCfg.getBytes("UTF-8")), "UTF-8"); secretData.put(".dockercfg", dockerCfg); SecretBuilder secretBuilder = new SecretBuilder() .withNewMetadata() .withName(aksSecretName) .withNamespace(aksNamespace) .endMetadata() .withData(secretData) .withType("kubernetes.io/dockercfg"); System.out.println("Creating new secret: " + kubernetesClient.secrets().inNamespace(aksNamespace).create(secretBuilder.build())); ResourceManagerUtils.sleep(Duration.ofSeconds(5)); for (Secret kubeS : kubernetesClient.secrets().inNamespace(aksNamespace).list().getItems()) { System.out.println("\tFound secret: " + kubeS); } ReplicationController rc = new ReplicationControllerBuilder() .withNewMetadata() .withName("acrsample-rc") .withNamespace(aksNamespace) .addToLabels("acrsample-nginx", "nginx") .endMetadata() .withNewSpec() .withReplicas(2) .withNewTemplate() .withNewMetadata() .addToLabels("acrsample-nginx", "nginx") .endMetadata() .withNewSpec() .addNewImagePullSecret(aksSecretName) .addNewContainer() .withName("acrsample-pod-nginx") .withImage(privateRepoUrl) .addNewPort() .withContainerPort(80) .endPort() .endContainer() .endSpec() .endTemplate() .endSpec() .build(); System.out.println("Creating a replication controller: " + kubernetesClient.replicationControllers().inNamespace(aksNamespace).create(rc)); ResourceManagerUtils.sleep(Duration.ofSeconds(5)); rc = kubernetesClient.replicationControllers().inNamespace(aksNamespace).withName("acrsample-rc").get(); System.out.println("Found replication controller: " + rc.toString()); for (Pod pod : kubernetesClient.pods().inNamespace(aksNamespace).list().getItems()) { System.out.println("\tFound Kubernetes pods: " + pod.toString()); } Service lbService = new ServiceBuilder() .withNewMetadata() .withName(aksLbIngressName) .withNamespace(aksNamespace) .endMetadata() .withNewSpec() .withType("LoadBalancer") .addNewPort() .withPort(80) .withProtocol("TCP") .endPort() .addToSelector("acrsample-nginx", "nginx") .endSpec() .build(); System.out.println("Creating a service: " + kubernetesClient.services().inNamespace(aksNamespace).create(lbService)); ResourceManagerUtils.sleep(Duration.ofSeconds(5)); System.out.println("\tFound service: " + kubernetesClient.services().inNamespace(aksNamespace).withName(aksLbIngressName).get()); int timeout = 30 * 60 * 1000; String matchIPV4 = "^(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)(\\.(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)){3}$"; while (timeout > 0) { try { List<LoadBalancerIngress> lbIngressList = kubernetesClient.services().inNamespace(aksNamespace).withName(aksLbIngressName).get().getStatus().getLoadBalancer().getIngress(); if (lbIngressList != null && !lbIngressList.isEmpty() && lbIngressList.get(0) != null && lbIngressList.get(0).getIp().matches(matchIPV4)) { System.out.println("\tFound ingress IP: " + lbIngressList.get(0).getIp()); timeout = 0; } } catch (Exception e) { } if (timeout > 0) { timeout -= 30000; ResourceManagerUtils.sleep(Duration.ofSeconds(30)); } } kubernetesClient.namespaces().delete(ns); return true; } finally { try { System.out.println("Deleting Resource Group: " + rgName); azureResourceManager.resourceGroups().beginDeleteByName(rgName); System.out.println("Deleted Resource Group: " + rgName); } catch (NullPointerException npe) { System.out.println("Did not create any resources in Azure. No clean up is necessary"); } catch (Exception g) { g.printStackTrace(); } } }
.withVirtualMachineSize(ContainerServiceVMSizeTypes.STANDARD_D2_V2)
public static boolean runSample(AzureResourceManager azureResourceManager, String clientId, String secret) throws IOException, JSchException, InterruptedException { final String rgName = Utils.randomResourceName(azureResourceManager, "rgaks", 15); final String acrName = Utils.randomResourceName(azureResourceManager, "acrsample", 20); final String aksName = Utils.randomResourceName(azureResourceManager, "akssample", 30); final String rootUserName = "aksuser"; final Region region = Region.US_EAST; final String dockerImageName = "nginx"; final String dockerImageTag = "latest"; final String dockerContainerName = "acrsample-nginx"; String aksSecretName = "mysecret112233"; String aksNamespace = "acrsample"; String aksLbIngressName = "lb-acrsample"; String servicePrincipalClientId = clientId; String servicePrincipalSecret = secret; try { if (servicePrincipalClientId == null || servicePrincipalClientId.isEmpty() || servicePrincipalSecret == null || servicePrincipalSecret.isEmpty()) { servicePrincipalClientId = System.getenv("AZURE_CLIENT_ID"); servicePrincipalSecret = System.getenv("AZURE_CLIENT_SECRET"); if (servicePrincipalClientId == null || servicePrincipalClientId.isEmpty() || servicePrincipalSecret == null || servicePrincipalSecret.isEmpty()) { String envSecondaryServicePrincipal = System.getenv("AZURE_AUTH_LOCATION_2"); if (envSecondaryServicePrincipal == null || !envSecondaryServicePrincipal.isEmpty() || !Files.exists(Paths.get(envSecondaryServicePrincipal))) { envSecondaryServicePrincipal = System.getenv("AZURE_AUTH_LOCATION"); } servicePrincipalClientId = Utils.getSecondaryServicePrincipalClientID(envSecondaryServicePrincipal); servicePrincipalSecret = Utils.getSecondaryServicePrincipalSecret(envSecondaryServicePrincipal); } } System.out.println("Creating an SSH private and public key pair"); SSHShell.SshPublicPrivateKey sshKeys = SSHShell.generateSSHKeys("", "ACS"); System.out.println("SSH private key value: %n" + sshKeys.getSshPrivateKey()); System.out.println("SSH public key value: %n" + sshKeys.getSshPublicKey()); System.out.println("Creating an Azure Container Service with managed Kubernetes cluster and one agent pool with one virtual machine"); Date t1 = new Date(); KubernetesCluster kubernetesCluster = azureResourceManager.kubernetesClusters().define(aksName) .withRegion(region) .withNewResourceGroup(rgName) .withDefaultVersion() .withRootUsername(rootUserName) .withSshKey(sshKeys.getSshPublicKey()) .withServicePrincipalClientId(servicePrincipalClientId) .withServicePrincipalSecret(servicePrincipalSecret) .defineAgentPool("agentpool") .withVirtualMachineSize(ContainerServiceVMSizeTypes.STANDARD_D2_V2) .withAgentPoolVirtualMachineCount(1) .withAgentPoolMode(AgentPoolMode.SYSTEM) .attach() .withDnsPrefix("dns-" + aksName) .create(); Date t2 = new Date(); System.out.println("Created Azure Container Service (AKS) resource: (took " + ((t2.getTime() - t1.getTime()) / 1000) + " seconds) " + kubernetesCluster.id()); Utils.print(kubernetesCluster); System.out.println("Creating an Azure Container Registry"); t1 = new Date(); Registry azureRegistry = azureResourceManager.containerRegistries().define(acrName) .withRegion(region) .withNewResourceGroup(rgName) .withBasicSku() .withRegistryNameAsAdminUser() .create(); t2 = new Date(); System.out.println("Created Azure Container Registry: (took " + ((t2.getTime() - t1.getTime()) / 1000) + " seconds) " + azureRegistry.id()); Utils.print(azureRegistry); RegistryCredentials acrCredentials = azureRegistry.getCredentials(); DockerClient dockerClient = DockerUtils.createDockerClient(azureResourceManager, rgName, region, azureRegistry.loginServerUrl(), acrCredentials.username(), acrCredentials.accessKeys().get(AccessKeyType.PRIMARY)); dockerClient.pullImageCmd(dockerImageName) .withTag(dockerImageTag) .withAuthConfig(new AuthConfig()) .exec(new PullImageResultCallback()) .awaitCompletion(); System.out.println("List local Docker images:"); List<Image> images = dockerClient.listImagesCmd().withShowAll(true).exec(); for (Image image : images) { System.out.format("\tFound Docker image %s (%s)%n", image.getRepoTags()[0], image.getId()); } CreateContainerResponse dockerContainerInstance = dockerClient.createContainerCmd(dockerImageName + ":" + dockerImageTag) .withName(dockerContainerName) .withCmd("/hello") .exec(); System.out.println("List Docker containers:"); List<Container> dockerContainers = dockerClient.listContainersCmd() .withShowAll(true) .exec(); for (Container container : dockerContainers) { System.out.format("\tFound Docker container %s (%s)%n", container.getImage(), container.getId()); } String privateRepoUrl = azureRegistry.loginServerUrl() + "/samples/" + dockerContainerName; dockerClient.commitCmd(dockerContainerInstance.getId()) .withRepository(privateRepoUrl) .withTag("latest").exec(); dockerClient.removeContainerCmd(dockerContainerInstance.getId()) .withForce(true) .exec(); dockerClient.pushImageCmd(privateRepoUrl) .withAuthConfig(dockerClient.authConfig()) .exec(new PushImageResultCallback()).awaitSuccess(); try { dockerClient.removeImageCmd(dockerImageName + ":" + dockerImageTag).withForce(true).exec(); } catch (NotFoundException e) { } dockerClient.pullImageCmd(privateRepoUrl) .withAuthConfig(dockerClient.authConfig()) .exec(new PullImageResultCallback()).awaitCompletion(); System.out.println("List local Docker images after pulling sample image from the Azure Container Registry:"); images = dockerClient.listImagesCmd() .withShowAll(true) .exec(); for (Image image : images) { System.out.format("\tFound Docker image %s (%s)%n", image.getRepoTags()[0], image.getId()); } dockerClient.createContainerCmd(privateRepoUrl) .withName(dockerContainerName + "-private") .withCmd("/hello").exec(); System.out.println("List Docker containers after instantiating container from the Azure Container Registry sample image:"); dockerContainers = dockerClient.listContainersCmd() .withShowAll(true) .exec(); for (Container container : dockerContainers) { System.out.format("\tFound Docker container %s (%s)%n", container.getImage(), container.getId()); } kubernetesCluster = azureResourceManager.kubernetesClusters().getByResourceGroup(rgName, aksName); System.out.println("Found Kubernetes master at: " + kubernetesCluster.fqdn()); byte[] kubeConfigContent = kubernetesCluster.adminKubeConfigContent(); File tempKubeConfigFile = File.createTempFile("kube", ".config", new File(System.getProperty("java.io.tmpdir"))); tempKubeConfigFile.deleteOnExit(); try (BufferedWriter buffOut = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(tempKubeConfigFile), StandardCharsets.UTF_8))) { buffOut.write(new String(kubeConfigContent, StandardCharsets.UTF_8)); } System.setProperty(Config.KUBERNETES_KUBECONFIG_FILE, tempKubeConfigFile.getPath()); Config config = new Config(); KubernetesClient kubernetesClient = new DefaultKubernetesClient(config); System.out.println(kubernetesClient.nodes().list()); Namespace ns = new NamespaceBuilder() .withNewMetadata() .withName(aksNamespace) .addToLabels("acr", "sample") .endMetadata() .build(); try { System.out.println("Created namespace" + kubernetesClient.namespaces().create(ns)); } catch (Exception e) { System.err.println(e.getMessage()); } ResourceManagerUtils.sleep(Duration.ofSeconds(5)); for (Namespace namespace : kubernetesClient.namespaces().list().getItems()) { System.out.println("\tFound Kubernetes namespace: " + namespace.toString()); } String basicAuth = new String(Base64.encodeBase64((acrCredentials.username() + ":" + acrCredentials.accessKeys().get(AccessKeyType.PRIMARY)).getBytes(StandardCharsets.UTF_8)), StandardCharsets.UTF_8); HashMap<String, String> secretData = new HashMap<>(1); String dockerCfg = String.format("{ \"%s\": { \"auth\": \"%s\", \"email\": \"%s\" } }", azureRegistry.loginServerUrl(), basicAuth, "acrsample@azure.com"); dockerCfg = new String(Base64.encodeBase64(dockerCfg.getBytes("UTF-8")), "UTF-8"); secretData.put(".dockercfg", dockerCfg); SecretBuilder secretBuilder = new SecretBuilder() .withNewMetadata() .withName(aksSecretName) .withNamespace(aksNamespace) .endMetadata() .withData(secretData) .withType("kubernetes.io/dockercfg"); System.out.println("Creating new secret: " + kubernetesClient.secrets().inNamespace(aksNamespace).create(secretBuilder.build())); ResourceManagerUtils.sleep(Duration.ofSeconds(5)); for (Secret kubeS : kubernetesClient.secrets().inNamespace(aksNamespace).list().getItems()) { System.out.println("\tFound secret: " + kubeS); } ReplicationController rc = new ReplicationControllerBuilder() .withNewMetadata() .withName("acrsample-rc") .withNamespace(aksNamespace) .addToLabels("acrsample-nginx", "nginx") .endMetadata() .withNewSpec() .withReplicas(2) .withNewTemplate() .withNewMetadata() .addToLabels("acrsample-nginx", "nginx") .endMetadata() .withNewSpec() .addNewImagePullSecret(aksSecretName) .addNewContainer() .withName("acrsample-pod-nginx") .withImage(privateRepoUrl) .addNewPort() .withContainerPort(80) .endPort() .endContainer() .endSpec() .endTemplate() .endSpec() .build(); System.out.println("Creating a replication controller: " + kubernetesClient.replicationControllers().inNamespace(aksNamespace).create(rc)); ResourceManagerUtils.sleep(Duration.ofSeconds(5)); rc = kubernetesClient.replicationControllers().inNamespace(aksNamespace).withName("acrsample-rc").get(); System.out.println("Found replication controller: " + rc.toString()); for (Pod pod : kubernetesClient.pods().inNamespace(aksNamespace).list().getItems()) { System.out.println("\tFound Kubernetes pods: " + pod.toString()); } Service lbService = new ServiceBuilder() .withNewMetadata() .withName(aksLbIngressName) .withNamespace(aksNamespace) .endMetadata() .withNewSpec() .withType("LoadBalancer") .addNewPort() .withPort(80) .withProtocol("TCP") .endPort() .addToSelector("acrsample-nginx", "nginx") .endSpec() .build(); System.out.println("Creating a service: " + kubernetesClient.services().inNamespace(aksNamespace).create(lbService)); ResourceManagerUtils.sleep(Duration.ofSeconds(5)); System.out.println("\tFound service: " + kubernetesClient.services().inNamespace(aksNamespace).withName(aksLbIngressName).get()); int timeout = 30 * 60 * 1000; String matchIPV4 = "^(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)(\\.(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)){3}$"; while (timeout > 0) { try { List<LoadBalancerIngress> lbIngressList = kubernetesClient.services().inNamespace(aksNamespace).withName(aksLbIngressName).get().getStatus().getLoadBalancer().getIngress(); if (lbIngressList != null && !lbIngressList.isEmpty() && lbIngressList.get(0) != null && lbIngressList.get(0).getIp().matches(matchIPV4)) { System.out.println("\tFound ingress IP: " + lbIngressList.get(0).getIp()); timeout = 0; } } catch (Exception e) { } if (timeout > 0) { timeout -= 30000; ResourceManagerUtils.sleep(Duration.ofSeconds(30)); } } kubernetesClient.namespaces().delete(ns); return true; } finally { try { System.out.println("Deleting Resource Group: " + rgName); azureResourceManager.resourceGroups().beginDeleteByName(rgName); System.out.println("Deleted Resource Group: " + rgName); } catch (NullPointerException npe) { System.out.println("Did not create any resources in Azure. No clean up is necessary"); } catch (Exception g) { g.printStackTrace(); } } }
class DeployImageFromContainerRegistryToKubernetes { /** * Main function which runs the actual sample. * * @param azureResourceManager instance of the azure client * @param clientId secondary service principal client ID * @param secret secondary service principal secret * @return true if sample runs successfully */ /** * Main entry point. * * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager, "", ""); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } }
class DeployImageFromContainerRegistryToKubernetes { /** * Main function which runs the actual sample. * * @param azureResourceManager instance of the azure client * @param clientId secondary service principal client ID * @param secret secondary service principal secret * @return true if sample runs successfully */ /** * Main entry point. * * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager, "", ""); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } }
I think maybe JAR is better, corresponding to windows version.
public static boolean runSample(AzureResourceManager azure, String clientId) throws IOException, InterruptedException { final Region region = Region.US_WEST; final String acrName = Utils.randomResourceName(azure, "acr", 20); final String appName = Utils.randomResourceName(azure, "webapp1-", 20); final String password = Utils.password(); final String rgName = Utils.randomResourceName(azure, "rg1NEMV_", 24); final String vaultName = Utils.randomResourceName(azure, "vault", 20); final String cosmosName = Utils.randomResourceName(azure, "cosmosdb", 20); String servicePrincipalClientId = clientId; try { System.out.println("Creating a CosmosDB..."); CosmosDBAccount cosmosDBAccount = azure.cosmosDBAccounts().define(cosmosName) .withRegion(region) .withNewResourceGroup(rgName) .withDataModelSql() .withStrongConsistency() .create(); System.out.println("Created CosmosDB"); Utils.print(cosmosDBAccount); ServicePrincipal servicePrincipal = azure.accessManagement().servicePrincipals() .define(appName) .withNewApplication("http: .definePasswordCredential("password") .withPasswordValue(password) .attach() .create(); if (servicePrincipalClientId == null || servicePrincipalClientId.isEmpty()) { servicePrincipalClientId = System.getenv("AZURE_CLIENT_ID"); if (servicePrincipalClientId == null || servicePrincipalClientId.isEmpty()) { String envSecondaryServicePrincipal = System.getenv("AZURE_AUTH_LOCATION_2"); if (envSecondaryServicePrincipal == null || !envSecondaryServicePrincipal.isEmpty() || !Files.exists(Paths.get(envSecondaryServicePrincipal))) { envSecondaryServicePrincipal = System.getenv("AZURE_AUTH_LOCATION"); } servicePrincipalClientId = Utils.getSecondaryServicePrincipalClientID(envSecondaryServicePrincipal); } } Vault vault = azure.vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(rgName) .defineAccessPolicy() .forServicePrincipal(servicePrincipalClientId) .allowSecretAllPermissions() .attach() .defineAccessPolicy() .forServicePrincipal(servicePrincipal) .allowSecretPermissions(SecretPermissions.GET, SecretPermissions.LIST) .attach() .create(); vault.secrets().define("azure-documentdb-uri") .withValue(cosmosDBAccount.documentEndpoint()) .create(); vault.secrets().define("azure-documentdb-key") .withValue(cosmosDBAccount.listKeys().primaryMasterKey()) .create(); vault.secrets().define("azure-documentdb-database") .withValue("tododb") .create(); System.out.println("Creating an Azure Container Registry"); long t1 = System.currentTimeMillis(); Registry azureRegistry = azure.containerRegistries().define(acrName) .withRegion(region) .withNewResourceGroup(rgName) .withBasicSku() .withRegistryNameAsAdminUser() .create(); long t2 = System.currentTimeMillis(); System.out.println("Created Azure Container Registry: (took " + ((t2 - t1) / 1000) + " seconds) " + azureRegistry.id()); Utils.print(azureRegistry); RegistryCredentials acrCredentials = azureRegistry.getCredentials(); DockerClient dockerClient = DockerUtils.createDockerClient(azure, rgName, region, azureRegistry.loginServerUrl(), acrCredentials.username(), acrCredentials.accessKeys().get(AccessKeyType.PRIMARY)); String imageName = "tomcat:7.0-slim"; String privateRepoUrl = azureRegistry.loginServerUrl() + "/todoapp"; dockerClient.pullImageCmd(imageName) .withAuthConfig(new AuthConfig()) .exec(new PullImageResultCallback()) .awaitCompletion(); String imageId = dockerClient.inspectImageCmd(imageName).exec().getId(); dockerClient.tagImageCmd(imageId, privateRepoUrl, "latest").exec(); dockerClient.pushImageCmd(privateRepoUrl) .exec(new PushImageResultCallback()).awaitCompletion(); System.out.println("Creating web app " + appName + " in resource group " + rgName + "..."); WebApp app1 = azure.webApps() .define(appName) .withRegion(Region.US_WEST) .withNewResourceGroup(rgName) .withNewLinuxPlan(PricingTier.STANDARD_S1) .withPrivateRegistryImage(privateRepoUrl, azureRegistry.loginServerUrl()) .withCredentials(acrCredentials.username(), acrCredentials.accessKeys().get(AccessKeyType.PRIMARY)) .withAppSetting("AZURE_KEYVAULT_URI", vault.vaultUri()) .withAppSetting("AZURE_KEYVAULT_CLIENT_ID", servicePrincipal.applicationId()) .withAppSetting("AZURE_KEYVAULT_CLIENT_KEY", password) .create(); System.out.println("Created web app " + app1.name()); Utils.print(app1); return true; } finally { try { System.out.println("Deleting Resource Group: " + rgName); azure.resourceGroups().beginDeleteByName(rgName); System.out.println("Deleted Resource Group: " + rgName); } catch (NullPointerException npe) { System.out.println("Did not create any resources in Azure. No clean up is necessary"); } catch (Exception g) { g.printStackTrace(); } } }
String privateRepoUrl = azureRegistry.loginServerUrl() + "/todoapp";
public static boolean runSample(AzureResourceManager azure, String clientId) throws IOException, InterruptedException { final Region region = Region.US_WEST; final String acrName = Utils.randomResourceName(azure, "acr", 20); final String appName = Utils.randomResourceName(azure, "webapp1-", 20); final String password = Utils.password(); final String rgName = Utils.randomResourceName(azure, "rg1NEMV_", 24); final String vaultName = Utils.randomResourceName(azure, "vault", 20); final String cosmosName = Utils.randomResourceName(azure, "cosmosdb", 20); String servicePrincipalClientId = clientId; try { System.out.println("Creating a CosmosDB..."); CosmosDBAccount cosmosDBAccount = azure.cosmosDBAccounts().define(cosmosName) .withRegion(region) .withNewResourceGroup(rgName) .withDataModelSql() .withStrongConsistency() .create(); System.out.println("Created CosmosDB"); Utils.print(cosmosDBAccount); ServicePrincipal servicePrincipal = azure.accessManagement().servicePrincipals() .define(appName) .withNewApplication("http: .definePasswordCredential("password") .withPasswordValue(password) .attach() .create(); if (servicePrincipalClientId == null || servicePrincipalClientId.isEmpty()) { servicePrincipalClientId = System.getenv("AZURE_CLIENT_ID"); if (servicePrincipalClientId == null || servicePrincipalClientId.isEmpty()) { String envSecondaryServicePrincipal = System.getenv("AZURE_AUTH_LOCATION_2"); if (envSecondaryServicePrincipal == null || !envSecondaryServicePrincipal.isEmpty() || !Files.exists(Paths.get(envSecondaryServicePrincipal))) { envSecondaryServicePrincipal = System.getenv("AZURE_AUTH_LOCATION"); } servicePrincipalClientId = Utils.getSecondaryServicePrincipalClientID(envSecondaryServicePrincipal); } } Vault vault = azure.vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(rgName) .defineAccessPolicy() .forServicePrincipal(servicePrincipalClientId) .allowSecretAllPermissions() .attach() .defineAccessPolicy() .forServicePrincipal(servicePrincipal) .allowSecretPermissions(SecretPermissions.GET, SecretPermissions.LIST) .attach() .create(); vault.secrets().define("azure-documentdb-uri") .withValue(cosmosDBAccount.documentEndpoint()) .create(); vault.secrets().define("azure-documentdb-key") .withValue(cosmosDBAccount.listKeys().primaryMasterKey()) .create(); vault.secrets().define("azure-documentdb-database") .withValue("tododb") .create(); System.out.println("Creating an Azure Container Registry"); long t1 = System.currentTimeMillis(); Registry azureRegistry = azure.containerRegistries().define(acrName) .withRegion(region) .withNewResourceGroup(rgName) .withBasicSku() .withRegistryNameAsAdminUser() .create(); long t2 = System.currentTimeMillis(); System.out.println("Created Azure Container Registry: (took " + ((t2 - t1) / 1000) + " seconds) " + azureRegistry.id()); Utils.print(azureRegistry); RegistryCredentials acrCredentials = azureRegistry.getCredentials(); DockerClient dockerClient = DockerUtils.createDockerClient(azure, rgName, region, azureRegistry.loginServerUrl(), acrCredentials.username(), acrCredentials.accessKeys().get(AccessKeyType.PRIMARY)); String imageName = "tomcat:7.0-slim"; String privateRepoUrl = azureRegistry.loginServerUrl() + "/todoapp"; dockerClient.pullImageCmd(imageName) .withAuthConfig(new AuthConfig()) .exec(new PullImageResultCallback()) .awaitCompletion(); String imageId = dockerClient.inspectImageCmd(imageName).exec().getId(); dockerClient.tagImageCmd(imageId, privateRepoUrl, "latest").exec(); dockerClient.pushImageCmd(privateRepoUrl) .exec(new PushImageResultCallback()).awaitCompletion(); System.out.println("Creating web app " + appName + " in resource group " + rgName + "..."); WebApp app1 = azure.webApps() .define(appName) .withRegion(Region.US_WEST) .withNewResourceGroup(rgName) .withNewLinuxPlan(PricingTier.STANDARD_S1) .withPrivateRegistryImage(privateRepoUrl, azureRegistry.loginServerUrl()) .withCredentials(acrCredentials.username(), acrCredentials.accessKeys().get(AccessKeyType.PRIMARY)) .withAppSetting("AZURE_KEYVAULT_URI", vault.vaultUri()) .withAppSetting("AZURE_KEYVAULT_CLIENT_ID", servicePrincipal.applicationId()) .withAppSetting("AZURE_KEYVAULT_CLIENT_KEY", password) .create(); System.out.println("Created web app " + app1.name()); Utils.print(app1); return true; } finally { try { System.out.println("Deleting Resource Group: " + rgName); azure.resourceGroups().beginDeleteByName(rgName); System.out.println("Deleted Resource Group: " + rgName); } catch (NullPointerException npe) { System.out.println("Did not create any resources in Azure. No clean up is necessary"); } catch (Exception g) { g.printStackTrace(); } } }
class ManageLinuxWebAppCosmosDbByMsi { /** * Main function which runs the actual sample. * @param azure instance of the azure client * @param clientId the client ID * @return true if sample runs successfully */ /** * Main entry point. * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager, ""); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } }
class ManageLinuxWebAppCosmosDbByMsi { /** * Main function which runs the actual sample. * @param azure instance of the azure client * @param clientId the client ID * @return true if sample runs successfully */ /** * Main entry point. * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager, ""); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } }
please don't do concat in the logger ```suggestion logger.error("Error during pre populating item", throwable); ```
private void createPrePopulatedDocs(int numberOfPreCreatedDocuments) { for (CosmosAsyncContainer container : containers) { AtomicLong successCount = new AtomicLong(0); AtomicLong failureCount = new AtomicLong(0); ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>(); for (int i = 0; i < numberOfPreCreatedDocuments; i++) { String uId = UUID.randomUUID().toString(); PojoizedJson newDoc = BenchmarkHelper.generateDocument(uId, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); Flux<PojoizedJson> obs = container.createItem(newDoc).map(resp -> { PojoizedJson x = resp.getItem(); return x; }).onErrorResume(throwable -> { failureCount.incrementAndGet(); logger.error("Error during pre populating item "+throwable.getMessage()); return Mono.empty(); }).doOnSuccess(pojoizedJson -> { successCount.incrementAndGet(); }).flux(); createDocumentObservables.add(obs); } docsToRead.put(container.getId(), Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block()); logger.info("Finished pre-populating {} documents for container {}", successCount.get()-failureCount.get(), container.getId()); if(failureCount.get() > 0) { logger.info("Failed pre-populating {} documents for container {}", failureCount.get(), container.getId()); } } }
logger.error("Error during pre populating item "+throwable.getMessage());
private void createPrePopulatedDocs(int numberOfPreCreatedDocuments) { for (CosmosAsyncContainer container : containers) { AtomicLong successCount = new AtomicLong(0); AtomicLong failureCount = new AtomicLong(0); ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>(); for (int i = 0; i < numberOfPreCreatedDocuments; i++) { String uId = UUID.randomUUID().toString(); PojoizedJson newDoc = BenchmarkHelper.generateDocument(uId, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); Flux<PojoizedJson> obs = container.createItem(newDoc).map(resp -> { PojoizedJson x = resp.getItem(); return x; }).onErrorResume(throwable -> { failureCount.incrementAndGet(); logger.error("Error during pre populating item ", throwable.getMessage()); return Mono.empty(); }).doOnSuccess(pojoizedJson -> { successCount.incrementAndGet(); }).flux(); createDocumentObservables.add(obs); } docsToRead.put(container.getId(), Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block()); logger.info("Finished pre-populating {} documents for container {}", successCount.get() - failureCount.get(), container.getId()); if (failureCount.get() > 0) { logger.info("Failed pre-populating {} documents for container {}", failureCount.get(), container.getId()); } } }
class AsyncCtlWorkload { private final String PERCENT_PARSING_ERROR = "Unable to parse user provided readWriteQueryPct "; private final String prefixUuidForCreate; private final String dataFieldValue; private final String partitionKey; private final MetricRegistry metricsRegistry = new MetricRegistry(); private final Logger logger; private final CosmosAsyncClient cosmosClient; private final Configuration configuration; private final Map<String, List<PojoizedJson>> docsToRead = new HashMap<>(); private final Semaphore concurrencyControlSemaphore; private final Random random; private Timer readLatency; private Timer writeLatency; private Timer queryLatency; private ScheduledReporter reporter; private Meter readSuccessMeter; private Meter readFailureMeter; private Meter writeSuccessMeter; private Meter writeFailureMeter; private Meter querySuccessMeter; private Meter queryFailureMeter; private CosmosAsyncDatabase cosmosAsyncDatabase; private List<CosmosAsyncContainer> containers = new ArrayList<>(); private List<String> containerToClearAfterTest = new ArrayList<>(); private boolean databaseCreated; private int readPct; private int writePct; private int queryPct; public AsyncCtlWorkload(Configuration cfg) { CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder() .endpoint(cfg.getServiceEndpoint()) .key(cfg.getMasterKey()) .consistencyLevel(cfg.getConsistencyLevel()) .contentResponseOnWriteEnabled(Boolean.parseBoolean(cfg.isContentResponseOnWriteEnabled())); if (cfg.getConnectionMode().equals(ConnectionMode.DIRECT)) { cosmosClientBuilder = cosmosClientBuilder.directMode(DirectConnectionConfig.getDefaultConfig()); } else { GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); gatewayConnectionConfig.setMaxConnectionPoolSize(cfg.getMaxConnectionPoolSize()); cosmosClientBuilder = cosmosClientBuilder.gatewayMode(gatewayConnectionConfig); } cosmosClient = cosmosClientBuilder.buildAsyncClient(); configuration = cfg; logger = LoggerFactory.getLogger(this.getClass()); parsedReadWriteQueryPct(configuration.getReadWriteQueryPct()); createDatabaseAndContainers(configuration); partitionKey = containers.get(0).read().block().getProperties().getPartitionKeyDefinition() .getPaths().iterator().next().split("/")[1]; concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency()); logger.info("PRE-populating {} documents ....", cfg.getNumberOfPreCreatedDocuments()); dataFieldValue = RandomStringUtils.randomAlphabetic(configuration.getDocumentDataFieldSize()); createPrePopulatedDocs(configuration.getNumberOfPreCreatedDocuments()); if (configuration.isEnableJvmStats()) { metricsRegistry.register("gc", new GarbageCollectorMetricSet()); metricsRegistry.register("threads", new CachedThreadStatesGaugeSet(10, TimeUnit.SECONDS)); metricsRegistry.register("memory", new MemoryUsageGaugeSet()); } initializeReporter(cfg); MeterRegistry registry = configuration.getAzureMonitorMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } registry = configuration.getGraphiteMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } prefixUuidForCreate = UUID.randomUUID().toString(); random = new Random(); } public void shutdown() { if (this.databaseCreated) { cosmosAsyncDatabase.delete().block(); logger.info("Deleted temporary database {} created for this test", this.configuration.getDatabaseId()); } else if (containerToClearAfterTest.size() > 0) { for (String id : containerToClearAfterTest) { cosmosAsyncDatabase.getContainer(id).delete().block(); logger.info("Deleted temporary collection {} created for this test", id); } } cosmosClient.close(); } private void performWorkload(BaseSubscriber<Object> documentSubscriber, OperationType type, long i) throws Exception { Flux<? extends Object> obs; CosmosAsyncContainer container = containers.get((int) i % containers.size()); if (type.equals(OperationType.Create)) { PojoizedJson data = BenchmarkHelper.generateDocument(prefixUuidForCreate + i, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); obs = container.createItem(data).flux(); } else if (type.equals(OperationType.Query)) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); String sqlQuery = "Select top 100 * from c order by c._ts"; obs = container.queryItems(sqlQuery, options, PojoizedJson.class).byPage(10); } else { int index = random.nextInt(docsToRead.get(container.getId()).size()); RequestOptions options = new RequestOptions(); String partitionKeyValue = docsToRead.get(container.getId()).get(index).getId(); options.setPartitionKey(new PartitionKey(partitionKeyValue)); obs = container.readItem(docsToRead.get(container.getId()).get(index).getId(), new PartitionKey(partitionKeyValue), PojoizedJson.class) .flux(); } concurrencyControlSemaphore.acquire(); obs.subscribeOn(Schedulers.parallel()).subscribe(documentSubscriber); } public void run() throws Exception { readSuccessMeter = metricsRegistry.meter(" readFailureMeter = metricsRegistry.meter(" writeSuccessMeter = metricsRegistry.meter(" writeFailureMeter = metricsRegistry.meter(" querySuccessMeter = metricsRegistry.meter(" queryFailureMeter = metricsRegistry.meter(" readLatency = metricsRegistry.register("Read Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); writeLatency = metricsRegistry.register("Write Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); queryLatency = metricsRegistry.register("Query Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS); long startTime = System.currentTimeMillis(); AtomicLong count = new AtomicLong(0); long i; int writeRange = readPct + writePct; for (i = 0; BenchmarkHelper.shouldContinue(startTime, i, configuration); i++) { int index = (int) i % 100; if (index < readPct) { BenchmarkRequestSubscriber<Object> readSubscriber = new BenchmarkRequestSubscriber<>(readSuccessMeter, readFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); readSubscriber.context = readLatency.time(); performWorkload(readSubscriber, OperationType.Read, i); } else if (index < writeRange) { BenchmarkRequestSubscriber<Object> writeSubscriber = new BenchmarkRequestSubscriber<>(writeSuccessMeter, writeFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); writeSubscriber.context = writeLatency.time(); performWorkload(writeSubscriber, OperationType.Create, i); } else { BenchmarkRequestSubscriber<Object> querySubscriber = new BenchmarkRequestSubscriber<>(querySuccessMeter, queryFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); querySubscriber.context = queryLatency.time(); performWorkload(querySubscriber, OperationType.Query, i); } } synchronized (count) { while (count.get() < i) { count.wait(); } } long endTime = System.currentTimeMillis(); logger.info("[{}] operations performed in [{}] seconds.", configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); reporter.report(); reporter.close(); } private void parsedReadWriteQueryPct(String readWriteQueryPct) { String[] readWriteQueryPctList = readWriteQueryPct.split(","); if (readWriteQueryPctList.length == 3) { try { if (Integer.valueOf(readWriteQueryPctList[0]) + Integer.valueOf(readWriteQueryPctList[1]) + Integer.valueOf(readWriteQueryPctList[2]) == 100) { readPct = Integer.valueOf(readWriteQueryPctList[0]); writePct = Integer.valueOf(readWriteQueryPctList[1]); queryPct = Integer.valueOf(readWriteQueryPctList[2]); } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } catch (NumberFormatException ex) { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } private void createDatabaseAndContainers(Configuration cfg) { try { cosmosAsyncDatabase = cosmosClient.getDatabase(this.configuration.getDatabaseId()); cosmosAsyncDatabase.read().block(); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosClient.createDatabase(cfg.getDatabaseId(), ThroughputProperties.createManualThroughput(this.configuration.getThroughput())).block(); cosmosAsyncDatabase = cosmosClient.getDatabase(cfg.getDatabaseId()); logger.info("Database {} is created for this test", this.configuration.getDatabaseId()); databaseCreated = true; } else { throw e; } } int numberOfCollection = cfg.getNumberOfCollectionForCtl(); if (numberOfCollection < 1) { numberOfCollection = 1; } for (int i = 1; i <= numberOfCollection; i++) { try { CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); cosmosAsyncContainer.read().block(); containers.add(cosmosAsyncContainer); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosAsyncDatabase.createContainer( this.configuration.getCollectionId() + "_" + i, Configuration.DEFAULT_PARTITION_KEY_PATH ).block(); CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); logger.info("Collection {} is created for this test", this.configuration.getCollectionId() + "_" + i); containers.add(cosmosAsyncContainer); containerToClearAfterTest.add(cosmosAsyncContainer.getId()); } else { throw e; } } } } private void initializeReporter(Configuration configuration) { if (configuration.getGraphiteEndpoint() != null) { final Graphite graphite = new Graphite(new InetSocketAddress( configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort())); reporter = GraphiteReporter.forRegistry(metricsRegistry) .prefixedWith(configuration.getOperationType().name()) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .filter(MetricFilter.ALL) .build(graphite); } else if (configuration.getReportingDirectory() != null) { reporter = CsvReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(configuration.getReportingDirectory()); } else { reporter = ConsoleReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(); } } }
class AsyncCtlWorkload { private final String PERCENT_PARSING_ERROR = "Unable to parse user provided readWriteQueryPct "; private final String prefixUuidForCreate; private final String dataFieldValue; private final String partitionKey; private final MetricRegistry metricsRegistry = new MetricRegistry(); private final Logger logger; private final CosmosAsyncClient cosmosClient; private final Configuration configuration; private final Map<String, List<PojoizedJson>> docsToRead = new HashMap<>(); private final Semaphore concurrencyControlSemaphore; private final Random random; private Timer readLatency; private Timer writeLatency; private Timer queryLatency; private ScheduledReporter reporter; private Meter readSuccessMeter; private Meter readFailureMeter; private Meter writeSuccessMeter; private Meter writeFailureMeter; private Meter querySuccessMeter; private Meter queryFailureMeter; private CosmosAsyncDatabase cosmosAsyncDatabase; private List<CosmosAsyncContainer> containers = new ArrayList<>(); private List<String> containerToClearAfterTest = new ArrayList<>(); private boolean databaseCreated; private int readPct; private int writePct; private int queryPct; public AsyncCtlWorkload(Configuration cfg) { CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder() .endpoint(cfg.getServiceEndpoint()) .key(cfg.getMasterKey()) .consistencyLevel(cfg.getConsistencyLevel()) .contentResponseOnWriteEnabled(Boolean.parseBoolean(cfg.isContentResponseOnWriteEnabled())); if (cfg.getConnectionMode().equals(ConnectionMode.DIRECT)) { cosmosClientBuilder = cosmosClientBuilder.directMode(DirectConnectionConfig.getDefaultConfig()); } else { GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); gatewayConnectionConfig.setMaxConnectionPoolSize(cfg.getMaxConnectionPoolSize()); cosmosClientBuilder = cosmosClientBuilder.gatewayMode(gatewayConnectionConfig); } cosmosClient = cosmosClientBuilder.buildAsyncClient(); configuration = cfg; logger = LoggerFactory.getLogger(this.getClass()); parsedReadWriteQueryPct(configuration.getReadWriteQueryPct()); createDatabaseAndContainers(configuration); partitionKey = containers.get(0).read().block().getProperties().getPartitionKeyDefinition() .getPaths().iterator().next().split("/")[1]; concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency()); logger.info("PRE-populating {} documents ....", cfg.getNumberOfPreCreatedDocuments()); dataFieldValue = RandomStringUtils.randomAlphabetic(configuration.getDocumentDataFieldSize()); createPrePopulatedDocs(configuration.getNumberOfPreCreatedDocuments()); if (configuration.isEnableJvmStats()) { metricsRegistry.register("gc", new GarbageCollectorMetricSet()); metricsRegistry.register("threads", new CachedThreadStatesGaugeSet(10, TimeUnit.SECONDS)); metricsRegistry.register("memory", new MemoryUsageGaugeSet()); } initializeReporter(cfg); MeterRegistry registry = configuration.getAzureMonitorMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } registry = configuration.getGraphiteMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } prefixUuidForCreate = UUID.randomUUID().toString(); random = new Random(); } public void shutdown() { if (this.databaseCreated) { cosmosAsyncDatabase.delete().block(); logger.info("Deleted temporary database {} created for this test", this.configuration.getDatabaseId()); } else if (containerToClearAfterTest.size() > 0) { for (String id : containerToClearAfterTest) { cosmosAsyncDatabase.getContainer(id).delete().block(); logger.info("Deleted temporary collection {} created for this test", id); } } cosmosClient.close(); } private void performWorkload(BaseSubscriber<Object> documentSubscriber, OperationType type, long i) throws Exception { Flux<? extends Object> obs; CosmosAsyncContainer container = containers.get((int) i % containers.size()); if (type.equals(OperationType.Create)) { PojoizedJson data = BenchmarkHelper.generateDocument(prefixUuidForCreate + i, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); obs = container.createItem(data).flux(); } else if (type.equals(OperationType.Query)) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); String sqlQuery = "Select top 100 * from c order by c._ts"; obs = container.queryItems(sqlQuery, options, PojoizedJson.class).byPage(10); } else { int index = random.nextInt(docsToRead.get(container.getId()).size()); RequestOptions options = new RequestOptions(); String partitionKeyValue = docsToRead.get(container.getId()).get(index).getId(); options.setPartitionKey(new PartitionKey(partitionKeyValue)); obs = container.readItem(docsToRead.get(container.getId()).get(index).getId(), new PartitionKey(partitionKeyValue), PojoizedJson.class) .flux(); } concurrencyControlSemaphore.acquire(); obs.subscribeOn(Schedulers.parallel()).subscribe(documentSubscriber); } public void run() throws Exception { readSuccessMeter = metricsRegistry.meter(" readFailureMeter = metricsRegistry.meter(" writeSuccessMeter = metricsRegistry.meter(" writeFailureMeter = metricsRegistry.meter(" querySuccessMeter = metricsRegistry.meter(" queryFailureMeter = metricsRegistry.meter(" readLatency = metricsRegistry.register("Read Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); writeLatency = metricsRegistry.register("Write Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); queryLatency = metricsRegistry.register("Query Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS); long startTime = System.currentTimeMillis(); AtomicLong count = new AtomicLong(0); long i; int writeRange = readPct + writePct; for (i = 0; BenchmarkHelper.shouldContinue(startTime, i, configuration); i++) { int index = (int) i % 100; if (index < readPct) { BenchmarkRequestSubscriber<Object> readSubscriber = new BenchmarkRequestSubscriber<>(readSuccessMeter, readFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); readSubscriber.context = readLatency.time(); performWorkload(readSubscriber, OperationType.Read, i); } else if (index < writeRange) { BenchmarkRequestSubscriber<Object> writeSubscriber = new BenchmarkRequestSubscriber<>(writeSuccessMeter, writeFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); writeSubscriber.context = writeLatency.time(); performWorkload(writeSubscriber, OperationType.Create, i); } else { BenchmarkRequestSubscriber<Object> querySubscriber = new BenchmarkRequestSubscriber<>(querySuccessMeter, queryFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); querySubscriber.context = queryLatency.time(); performWorkload(querySubscriber, OperationType.Query, i); } } synchronized (count) { while (count.get() < i) { count.wait(); } } long endTime = System.currentTimeMillis(); logger.info("[{}] operations performed in [{}] seconds.", configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); reporter.report(); reporter.close(); } private void parsedReadWriteQueryPct(String readWriteQueryPct) { String[] readWriteQueryPctList = readWriteQueryPct.split(","); if (readWriteQueryPctList.length == 3) { try { if (Integer.valueOf(readWriteQueryPctList[0]) + Integer.valueOf(readWriteQueryPctList[1]) + Integer.valueOf(readWriteQueryPctList[2]) == 100) { readPct = Integer.valueOf(readWriteQueryPctList[0]); writePct = Integer.valueOf(readWriteQueryPctList[1]); queryPct = Integer.valueOf(readWriteQueryPctList[2]); } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } catch (NumberFormatException ex) { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } private void createDatabaseAndContainers(Configuration cfg) { try { cosmosAsyncDatabase = cosmosClient.getDatabase(this.configuration.getDatabaseId()); cosmosAsyncDatabase.read().block(); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosClient.createDatabase(cfg.getDatabaseId(), ThroughputProperties.createManualThroughput(this.configuration.getThroughput())).block(); cosmosAsyncDatabase = cosmosClient.getDatabase(cfg.getDatabaseId()); logger.info("Database {} is created for this test", this.configuration.getDatabaseId()); databaseCreated = true; } else { throw e; } } int numberOfCollection = cfg.getNumberOfCollectionForCtl(); if (numberOfCollection < 1) { numberOfCollection = 1; } for (int i = 1; i <= numberOfCollection; i++) { try { CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); cosmosAsyncContainer.read().block(); containers.add(cosmosAsyncContainer); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosAsyncDatabase.createContainer( this.configuration.getCollectionId() + "_" + i, Configuration.DEFAULT_PARTITION_KEY_PATH ).block(); CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); logger.info("Collection {} is created for this test", this.configuration.getCollectionId() + "_" + i); containers.add(cosmosAsyncContainer); containerToClearAfterTest.add(cosmosAsyncContainer.getId()); } else { throw e; } } } } private void initializeReporter(Configuration configuration) { if (configuration.getGraphiteEndpoint() != null) { final Graphite graphite = new Graphite(new InetSocketAddress( configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort())); reporter = GraphiteReporter.forRegistry(metricsRegistry) .prefixedWith(configuration.getOperationType().name()) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .filter(MetricFilter.ALL) .build(graphite); } else if (configuration.getReportingDirectory() != null) { reporter = CsvReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(configuration.getReportingDirectory()); } else { reporter = ConsoleReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(); } } }
done
private void createPrePopulatedDocs(int numberOfPreCreatedDocuments) { for (CosmosAsyncContainer container : containers) { AtomicLong successCount = new AtomicLong(0); AtomicLong failureCount = new AtomicLong(0); ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>(); for (int i = 0; i < numberOfPreCreatedDocuments; i++) { String uId = UUID.randomUUID().toString(); PojoizedJson newDoc = BenchmarkHelper.generateDocument(uId, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); Flux<PojoizedJson> obs = container.createItem(newDoc).map(resp -> { PojoizedJson x = resp.getItem(); return x; }).onErrorResume(throwable -> { failureCount.incrementAndGet(); logger.error("Error during pre populating item "+throwable.getMessage()); return Mono.empty(); }).doOnSuccess(pojoizedJson -> { successCount.incrementAndGet(); }).flux(); createDocumentObservables.add(obs); } docsToRead.put(container.getId(), Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block()); logger.info("Finished pre-populating {} documents for container {}", successCount.get()-failureCount.get(), container.getId()); if(failureCount.get() > 0) { logger.info("Failed pre-populating {} documents for container {}", failureCount.get(), container.getId()); } } }
successCount.get()-failureCount.get(), container.getId());
private void createPrePopulatedDocs(int numberOfPreCreatedDocuments) { for (CosmosAsyncContainer container : containers) { AtomicLong successCount = new AtomicLong(0); AtomicLong failureCount = new AtomicLong(0); ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>(); for (int i = 0; i < numberOfPreCreatedDocuments; i++) { String uId = UUID.randomUUID().toString(); PojoizedJson newDoc = BenchmarkHelper.generateDocument(uId, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); Flux<PojoizedJson> obs = container.createItem(newDoc).map(resp -> { PojoizedJson x = resp.getItem(); return x; }).onErrorResume(throwable -> { failureCount.incrementAndGet(); logger.error("Error during pre populating item ", throwable.getMessage()); return Mono.empty(); }).doOnSuccess(pojoizedJson -> { successCount.incrementAndGet(); }).flux(); createDocumentObservables.add(obs); } docsToRead.put(container.getId(), Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block()); logger.info("Finished pre-populating {} documents for container {}", successCount.get() - failureCount.get(), container.getId()); if (failureCount.get() > 0) { logger.info("Failed pre-populating {} documents for container {}", failureCount.get(), container.getId()); } } }
class AsyncCtlWorkload { private final String PERCENT_PARSING_ERROR = "Unable to parse user provided readWriteQueryPct "; private final String prefixUuidForCreate; private final String dataFieldValue; private final String partitionKey; private final MetricRegistry metricsRegistry = new MetricRegistry(); private final Logger logger; private final CosmosAsyncClient cosmosClient; private final Configuration configuration; private final Map<String, List<PojoizedJson>> docsToRead = new HashMap<>(); private final Semaphore concurrencyControlSemaphore; private final Random random; private Timer readLatency; private Timer writeLatency; private Timer queryLatency; private ScheduledReporter reporter; private Meter readSuccessMeter; private Meter readFailureMeter; private Meter writeSuccessMeter; private Meter writeFailureMeter; private Meter querySuccessMeter; private Meter queryFailureMeter; private CosmosAsyncDatabase cosmosAsyncDatabase; private List<CosmosAsyncContainer> containers = new ArrayList<>(); private List<String> containerToClearAfterTest = new ArrayList<>(); private boolean databaseCreated; private int readPct; private int writePct; private int queryPct; public AsyncCtlWorkload(Configuration cfg) { CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder() .endpoint(cfg.getServiceEndpoint()) .key(cfg.getMasterKey()) .consistencyLevel(cfg.getConsistencyLevel()) .contentResponseOnWriteEnabled(Boolean.parseBoolean(cfg.isContentResponseOnWriteEnabled())); if (cfg.getConnectionMode().equals(ConnectionMode.DIRECT)) { cosmosClientBuilder = cosmosClientBuilder.directMode(DirectConnectionConfig.getDefaultConfig()); } else { GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); gatewayConnectionConfig.setMaxConnectionPoolSize(cfg.getMaxConnectionPoolSize()); cosmosClientBuilder = cosmosClientBuilder.gatewayMode(gatewayConnectionConfig); } cosmosClient = cosmosClientBuilder.buildAsyncClient(); configuration = cfg; logger = LoggerFactory.getLogger(this.getClass()); parsedReadWriteQueryPct(configuration.getReadWriteQueryPct()); createDatabaseAndContainers(configuration); partitionKey = containers.get(0).read().block().getProperties().getPartitionKeyDefinition() .getPaths().iterator().next().split("/")[1]; concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency()); logger.info("PRE-populating {} documents ....", cfg.getNumberOfPreCreatedDocuments()); dataFieldValue = RandomStringUtils.randomAlphabetic(configuration.getDocumentDataFieldSize()); createPrePopulatedDocs(configuration.getNumberOfPreCreatedDocuments()); if (configuration.isEnableJvmStats()) { metricsRegistry.register("gc", new GarbageCollectorMetricSet()); metricsRegistry.register("threads", new CachedThreadStatesGaugeSet(10, TimeUnit.SECONDS)); metricsRegistry.register("memory", new MemoryUsageGaugeSet()); } initializeReporter(cfg); MeterRegistry registry = configuration.getAzureMonitorMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } registry = configuration.getGraphiteMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } prefixUuidForCreate = UUID.randomUUID().toString(); random = new Random(); } public void shutdown() { if (this.databaseCreated) { cosmosAsyncDatabase.delete().block(); logger.info("Deleted temporary database {} created for this test", this.configuration.getDatabaseId()); } else if (containerToClearAfterTest.size() > 0) { for (String id : containerToClearAfterTest) { cosmosAsyncDatabase.getContainer(id).delete().block(); logger.info("Deleted temporary collection {} created for this test", id); } } cosmosClient.close(); } private void performWorkload(BaseSubscriber<Object> documentSubscriber, OperationType type, long i) throws Exception { Flux<? extends Object> obs; CosmosAsyncContainer container = containers.get((int) i % containers.size()); if (type.equals(OperationType.Create)) { PojoizedJson data = BenchmarkHelper.generateDocument(prefixUuidForCreate + i, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); obs = container.createItem(data).flux(); } else if (type.equals(OperationType.Query)) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); String sqlQuery = "Select top 100 * from c order by c._ts"; obs = container.queryItems(sqlQuery, options, PojoizedJson.class).byPage(10); } else { int index = random.nextInt(docsToRead.get(container.getId()).size()); RequestOptions options = new RequestOptions(); String partitionKeyValue = docsToRead.get(container.getId()).get(index).getId(); options.setPartitionKey(new PartitionKey(partitionKeyValue)); obs = container.readItem(docsToRead.get(container.getId()).get(index).getId(), new PartitionKey(partitionKeyValue), PojoizedJson.class) .flux(); } concurrencyControlSemaphore.acquire(); obs.subscribeOn(Schedulers.parallel()).subscribe(documentSubscriber); } public void run() throws Exception { readSuccessMeter = metricsRegistry.meter(" readFailureMeter = metricsRegistry.meter(" writeSuccessMeter = metricsRegistry.meter(" writeFailureMeter = metricsRegistry.meter(" querySuccessMeter = metricsRegistry.meter(" queryFailureMeter = metricsRegistry.meter(" readLatency = metricsRegistry.register("Read Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); writeLatency = metricsRegistry.register("Write Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); queryLatency = metricsRegistry.register("Query Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS); long startTime = System.currentTimeMillis(); AtomicLong count = new AtomicLong(0); long i; int writeRange = readPct + writePct; for (i = 0; BenchmarkHelper.shouldContinue(startTime, i, configuration); i++) { int index = (int) i % 100; if (index < readPct) { BenchmarkRequestSubscriber<Object> readSubscriber = new BenchmarkRequestSubscriber<>(readSuccessMeter, readFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); readSubscriber.context = readLatency.time(); performWorkload(readSubscriber, OperationType.Read, i); } else if (index < writeRange) { BenchmarkRequestSubscriber<Object> writeSubscriber = new BenchmarkRequestSubscriber<>(writeSuccessMeter, writeFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); writeSubscriber.context = writeLatency.time(); performWorkload(writeSubscriber, OperationType.Create, i); } else { BenchmarkRequestSubscriber<Object> querySubscriber = new BenchmarkRequestSubscriber<>(querySuccessMeter, queryFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); querySubscriber.context = queryLatency.time(); performWorkload(querySubscriber, OperationType.Query, i); } } synchronized (count) { while (count.get() < i) { count.wait(); } } long endTime = System.currentTimeMillis(); logger.info("[{}] operations performed in [{}] seconds.", configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); reporter.report(); reporter.close(); } private void parsedReadWriteQueryPct(String readWriteQueryPct) { String[] readWriteQueryPctList = readWriteQueryPct.split(","); if (readWriteQueryPctList.length == 3) { try { if (Integer.valueOf(readWriteQueryPctList[0]) + Integer.valueOf(readWriteQueryPctList[1]) + Integer.valueOf(readWriteQueryPctList[2]) == 100) { readPct = Integer.valueOf(readWriteQueryPctList[0]); writePct = Integer.valueOf(readWriteQueryPctList[1]); queryPct = Integer.valueOf(readWriteQueryPctList[2]); } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } catch (NumberFormatException ex) { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } private void createDatabaseAndContainers(Configuration cfg) { try { cosmosAsyncDatabase = cosmosClient.getDatabase(this.configuration.getDatabaseId()); cosmosAsyncDatabase.read().block(); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosClient.createDatabase(cfg.getDatabaseId(), ThroughputProperties.createManualThroughput(this.configuration.getThroughput())).block(); cosmosAsyncDatabase = cosmosClient.getDatabase(cfg.getDatabaseId()); logger.info("Database {} is created for this test", this.configuration.getDatabaseId()); databaseCreated = true; } else { throw e; } } int numberOfCollection = cfg.getNumberOfCollectionForCtl(); if (numberOfCollection < 1) { numberOfCollection = 1; } for (int i = 1; i <= numberOfCollection; i++) { try { CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); cosmosAsyncContainer.read().block(); containers.add(cosmosAsyncContainer); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosAsyncDatabase.createContainer( this.configuration.getCollectionId() + "_" + i, Configuration.DEFAULT_PARTITION_KEY_PATH ).block(); CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); logger.info("Collection {} is created for this test", this.configuration.getCollectionId() + "_" + i); containers.add(cosmosAsyncContainer); containerToClearAfterTest.add(cosmosAsyncContainer.getId()); } else { throw e; } } } } private void initializeReporter(Configuration configuration) { if (configuration.getGraphiteEndpoint() != null) { final Graphite graphite = new Graphite(new InetSocketAddress( configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort())); reporter = GraphiteReporter.forRegistry(metricsRegistry) .prefixedWith(configuration.getOperationType().name()) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .filter(MetricFilter.ALL) .build(graphite); } else if (configuration.getReportingDirectory() != null) { reporter = CsvReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(configuration.getReportingDirectory()); } else { reporter = ConsoleReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(); } } }
class AsyncCtlWorkload { private final String PERCENT_PARSING_ERROR = "Unable to parse user provided readWriteQueryPct "; private final String prefixUuidForCreate; private final String dataFieldValue; private final String partitionKey; private final MetricRegistry metricsRegistry = new MetricRegistry(); private final Logger logger; private final CosmosAsyncClient cosmosClient; private final Configuration configuration; private final Map<String, List<PojoizedJson>> docsToRead = new HashMap<>(); private final Semaphore concurrencyControlSemaphore; private final Random random; private Timer readLatency; private Timer writeLatency; private Timer queryLatency; private ScheduledReporter reporter; private Meter readSuccessMeter; private Meter readFailureMeter; private Meter writeSuccessMeter; private Meter writeFailureMeter; private Meter querySuccessMeter; private Meter queryFailureMeter; private CosmosAsyncDatabase cosmosAsyncDatabase; private List<CosmosAsyncContainer> containers = new ArrayList<>(); private List<String> containerToClearAfterTest = new ArrayList<>(); private boolean databaseCreated; private int readPct; private int writePct; private int queryPct; public AsyncCtlWorkload(Configuration cfg) { CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder() .endpoint(cfg.getServiceEndpoint()) .key(cfg.getMasterKey()) .consistencyLevel(cfg.getConsistencyLevel()) .contentResponseOnWriteEnabled(Boolean.parseBoolean(cfg.isContentResponseOnWriteEnabled())); if (cfg.getConnectionMode().equals(ConnectionMode.DIRECT)) { cosmosClientBuilder = cosmosClientBuilder.directMode(DirectConnectionConfig.getDefaultConfig()); } else { GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); gatewayConnectionConfig.setMaxConnectionPoolSize(cfg.getMaxConnectionPoolSize()); cosmosClientBuilder = cosmosClientBuilder.gatewayMode(gatewayConnectionConfig); } cosmosClient = cosmosClientBuilder.buildAsyncClient(); configuration = cfg; logger = LoggerFactory.getLogger(this.getClass()); parsedReadWriteQueryPct(configuration.getReadWriteQueryPct()); createDatabaseAndContainers(configuration); partitionKey = containers.get(0).read().block().getProperties().getPartitionKeyDefinition() .getPaths().iterator().next().split("/")[1]; concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency()); logger.info("PRE-populating {} documents ....", cfg.getNumberOfPreCreatedDocuments()); dataFieldValue = RandomStringUtils.randomAlphabetic(configuration.getDocumentDataFieldSize()); createPrePopulatedDocs(configuration.getNumberOfPreCreatedDocuments()); if (configuration.isEnableJvmStats()) { metricsRegistry.register("gc", new GarbageCollectorMetricSet()); metricsRegistry.register("threads", new CachedThreadStatesGaugeSet(10, TimeUnit.SECONDS)); metricsRegistry.register("memory", new MemoryUsageGaugeSet()); } initializeReporter(cfg); MeterRegistry registry = configuration.getAzureMonitorMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } registry = configuration.getGraphiteMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } prefixUuidForCreate = UUID.randomUUID().toString(); random = new Random(); } public void shutdown() { if (this.databaseCreated) { cosmosAsyncDatabase.delete().block(); logger.info("Deleted temporary database {} created for this test", this.configuration.getDatabaseId()); } else if (containerToClearAfterTest.size() > 0) { for (String id : containerToClearAfterTest) { cosmosAsyncDatabase.getContainer(id).delete().block(); logger.info("Deleted temporary collection {} created for this test", id); } } cosmosClient.close(); } private void performWorkload(BaseSubscriber<Object> documentSubscriber, OperationType type, long i) throws Exception { Flux<? extends Object> obs; CosmosAsyncContainer container = containers.get((int) i % containers.size()); if (type.equals(OperationType.Create)) { PojoizedJson data = BenchmarkHelper.generateDocument(prefixUuidForCreate + i, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); obs = container.createItem(data).flux(); } else if (type.equals(OperationType.Query)) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); String sqlQuery = "Select top 100 * from c order by c._ts"; obs = container.queryItems(sqlQuery, options, PojoizedJson.class).byPage(10); } else { int index = random.nextInt(docsToRead.get(container.getId()).size()); RequestOptions options = new RequestOptions(); String partitionKeyValue = docsToRead.get(container.getId()).get(index).getId(); options.setPartitionKey(new PartitionKey(partitionKeyValue)); obs = container.readItem(docsToRead.get(container.getId()).get(index).getId(), new PartitionKey(partitionKeyValue), PojoizedJson.class) .flux(); } concurrencyControlSemaphore.acquire(); obs.subscribeOn(Schedulers.parallel()).subscribe(documentSubscriber); } public void run() throws Exception { readSuccessMeter = metricsRegistry.meter(" readFailureMeter = metricsRegistry.meter(" writeSuccessMeter = metricsRegistry.meter(" writeFailureMeter = metricsRegistry.meter(" querySuccessMeter = metricsRegistry.meter(" queryFailureMeter = metricsRegistry.meter(" readLatency = metricsRegistry.register("Read Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); writeLatency = metricsRegistry.register("Write Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); queryLatency = metricsRegistry.register("Query Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS); long startTime = System.currentTimeMillis(); AtomicLong count = new AtomicLong(0); long i; int writeRange = readPct + writePct; for (i = 0; BenchmarkHelper.shouldContinue(startTime, i, configuration); i++) { int index = (int) i % 100; if (index < readPct) { BenchmarkRequestSubscriber<Object> readSubscriber = new BenchmarkRequestSubscriber<>(readSuccessMeter, readFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); readSubscriber.context = readLatency.time(); performWorkload(readSubscriber, OperationType.Read, i); } else if (index < writeRange) { BenchmarkRequestSubscriber<Object> writeSubscriber = new BenchmarkRequestSubscriber<>(writeSuccessMeter, writeFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); writeSubscriber.context = writeLatency.time(); performWorkload(writeSubscriber, OperationType.Create, i); } else { BenchmarkRequestSubscriber<Object> querySubscriber = new BenchmarkRequestSubscriber<>(querySuccessMeter, queryFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); querySubscriber.context = queryLatency.time(); performWorkload(querySubscriber, OperationType.Query, i); } } synchronized (count) { while (count.get() < i) { count.wait(); } } long endTime = System.currentTimeMillis(); logger.info("[{}] operations performed in [{}] seconds.", configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); reporter.report(); reporter.close(); } private void parsedReadWriteQueryPct(String readWriteQueryPct) { String[] readWriteQueryPctList = readWriteQueryPct.split(","); if (readWriteQueryPctList.length == 3) { try { if (Integer.valueOf(readWriteQueryPctList[0]) + Integer.valueOf(readWriteQueryPctList[1]) + Integer.valueOf(readWriteQueryPctList[2]) == 100) { readPct = Integer.valueOf(readWriteQueryPctList[0]); writePct = Integer.valueOf(readWriteQueryPctList[1]); queryPct = Integer.valueOf(readWriteQueryPctList[2]); } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } catch (NumberFormatException ex) { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } private void createDatabaseAndContainers(Configuration cfg) { try { cosmosAsyncDatabase = cosmosClient.getDatabase(this.configuration.getDatabaseId()); cosmosAsyncDatabase.read().block(); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosClient.createDatabase(cfg.getDatabaseId(), ThroughputProperties.createManualThroughput(this.configuration.getThroughput())).block(); cosmosAsyncDatabase = cosmosClient.getDatabase(cfg.getDatabaseId()); logger.info("Database {} is created for this test", this.configuration.getDatabaseId()); databaseCreated = true; } else { throw e; } } int numberOfCollection = cfg.getNumberOfCollectionForCtl(); if (numberOfCollection < 1) { numberOfCollection = 1; } for (int i = 1; i <= numberOfCollection; i++) { try { CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); cosmosAsyncContainer.read().block(); containers.add(cosmosAsyncContainer); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosAsyncDatabase.createContainer( this.configuration.getCollectionId() + "_" + i, Configuration.DEFAULT_PARTITION_KEY_PATH ).block(); CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); logger.info("Collection {} is created for this test", this.configuration.getCollectionId() + "_" + i); containers.add(cosmosAsyncContainer); containerToClearAfterTest.add(cosmosAsyncContainer.getId()); } else { throw e; } } } } private void initializeReporter(Configuration configuration) { if (configuration.getGraphiteEndpoint() != null) { final Graphite graphite = new Graphite(new InetSocketAddress( configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort())); reporter = GraphiteReporter.forRegistry(metricsRegistry) .prefixedWith(configuration.getOperationType().name()) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .filter(MetricFilter.ALL) .build(graphite); } else if (configuration.getReportingDirectory() != null) { reporter = CsvReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(configuration.getReportingDirectory()); } else { reporter = ConsoleReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(); } } }
Done
private void createPrePopulatedDocs(int numberOfPreCreatedDocuments) { for (CosmosAsyncContainer container : containers) { AtomicLong successCount = new AtomicLong(0); AtomicLong failureCount = new AtomicLong(0); ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>(); for (int i = 0; i < numberOfPreCreatedDocuments; i++) { String uId = UUID.randomUUID().toString(); PojoizedJson newDoc = BenchmarkHelper.generateDocument(uId, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); Flux<PojoizedJson> obs = container.createItem(newDoc).map(resp -> { PojoizedJson x = resp.getItem(); return x; }).onErrorResume(throwable -> { failureCount.incrementAndGet(); logger.error("Error during pre populating item "+throwable.getMessage()); return Mono.empty(); }).doOnSuccess(pojoizedJson -> { successCount.incrementAndGet(); }).flux(); createDocumentObservables.add(obs); } docsToRead.put(container.getId(), Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block()); logger.info("Finished pre-populating {} documents for container {}", successCount.get()-failureCount.get(), container.getId()); if(failureCount.get() > 0) { logger.info("Failed pre-populating {} documents for container {}", failureCount.get(), container.getId()); } } }
logger.error("Error during pre populating item "+throwable.getMessage());
private void createPrePopulatedDocs(int numberOfPreCreatedDocuments) { for (CosmosAsyncContainer container : containers) { AtomicLong successCount = new AtomicLong(0); AtomicLong failureCount = new AtomicLong(0); ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>(); for (int i = 0; i < numberOfPreCreatedDocuments; i++) { String uId = UUID.randomUUID().toString(); PojoizedJson newDoc = BenchmarkHelper.generateDocument(uId, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); Flux<PojoizedJson> obs = container.createItem(newDoc).map(resp -> { PojoizedJson x = resp.getItem(); return x; }).onErrorResume(throwable -> { failureCount.incrementAndGet(); logger.error("Error during pre populating item ", throwable.getMessage()); return Mono.empty(); }).doOnSuccess(pojoizedJson -> { successCount.incrementAndGet(); }).flux(); createDocumentObservables.add(obs); } docsToRead.put(container.getId(), Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block()); logger.info("Finished pre-populating {} documents for container {}", successCount.get() - failureCount.get(), container.getId()); if (failureCount.get() > 0) { logger.info("Failed pre-populating {} documents for container {}", failureCount.get(), container.getId()); } } }
class AsyncCtlWorkload { private final String PERCENT_PARSING_ERROR = "Unable to parse user provided readWriteQueryPct "; private final String prefixUuidForCreate; private final String dataFieldValue; private final String partitionKey; private final MetricRegistry metricsRegistry = new MetricRegistry(); private final Logger logger; private final CosmosAsyncClient cosmosClient; private final Configuration configuration; private final Map<String, List<PojoizedJson>> docsToRead = new HashMap<>(); private final Semaphore concurrencyControlSemaphore; private final Random random; private Timer readLatency; private Timer writeLatency; private Timer queryLatency; private ScheduledReporter reporter; private Meter readSuccessMeter; private Meter readFailureMeter; private Meter writeSuccessMeter; private Meter writeFailureMeter; private Meter querySuccessMeter; private Meter queryFailureMeter; private CosmosAsyncDatabase cosmosAsyncDatabase; private List<CosmosAsyncContainer> containers = new ArrayList<>(); private List<String> containerToClearAfterTest = new ArrayList<>(); private boolean databaseCreated; private int readPct; private int writePct; private int queryPct; public AsyncCtlWorkload(Configuration cfg) { CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder() .endpoint(cfg.getServiceEndpoint()) .key(cfg.getMasterKey()) .consistencyLevel(cfg.getConsistencyLevel()) .contentResponseOnWriteEnabled(Boolean.parseBoolean(cfg.isContentResponseOnWriteEnabled())); if (cfg.getConnectionMode().equals(ConnectionMode.DIRECT)) { cosmosClientBuilder = cosmosClientBuilder.directMode(DirectConnectionConfig.getDefaultConfig()); } else { GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); gatewayConnectionConfig.setMaxConnectionPoolSize(cfg.getMaxConnectionPoolSize()); cosmosClientBuilder = cosmosClientBuilder.gatewayMode(gatewayConnectionConfig); } cosmosClient = cosmosClientBuilder.buildAsyncClient(); configuration = cfg; logger = LoggerFactory.getLogger(this.getClass()); parsedReadWriteQueryPct(configuration.getReadWriteQueryPct()); createDatabaseAndContainers(configuration); partitionKey = containers.get(0).read().block().getProperties().getPartitionKeyDefinition() .getPaths().iterator().next().split("/")[1]; concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency()); logger.info("PRE-populating {} documents ....", cfg.getNumberOfPreCreatedDocuments()); dataFieldValue = RandomStringUtils.randomAlphabetic(configuration.getDocumentDataFieldSize()); createPrePopulatedDocs(configuration.getNumberOfPreCreatedDocuments()); if (configuration.isEnableJvmStats()) { metricsRegistry.register("gc", new GarbageCollectorMetricSet()); metricsRegistry.register("threads", new CachedThreadStatesGaugeSet(10, TimeUnit.SECONDS)); metricsRegistry.register("memory", new MemoryUsageGaugeSet()); } initializeReporter(cfg); MeterRegistry registry = configuration.getAzureMonitorMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } registry = configuration.getGraphiteMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } prefixUuidForCreate = UUID.randomUUID().toString(); random = new Random(); } public void shutdown() { if (this.databaseCreated) { cosmosAsyncDatabase.delete().block(); logger.info("Deleted temporary database {} created for this test", this.configuration.getDatabaseId()); } else if (containerToClearAfterTest.size() > 0) { for (String id : containerToClearAfterTest) { cosmosAsyncDatabase.getContainer(id).delete().block(); logger.info("Deleted temporary collection {} created for this test", id); } } cosmosClient.close(); } private void performWorkload(BaseSubscriber<Object> documentSubscriber, OperationType type, long i) throws Exception { Flux<? extends Object> obs; CosmosAsyncContainer container = containers.get((int) i % containers.size()); if (type.equals(OperationType.Create)) { PojoizedJson data = BenchmarkHelper.generateDocument(prefixUuidForCreate + i, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); obs = container.createItem(data).flux(); } else if (type.equals(OperationType.Query)) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); String sqlQuery = "Select top 100 * from c order by c._ts"; obs = container.queryItems(sqlQuery, options, PojoizedJson.class).byPage(10); } else { int index = random.nextInt(docsToRead.get(container.getId()).size()); RequestOptions options = new RequestOptions(); String partitionKeyValue = docsToRead.get(container.getId()).get(index).getId(); options.setPartitionKey(new PartitionKey(partitionKeyValue)); obs = container.readItem(docsToRead.get(container.getId()).get(index).getId(), new PartitionKey(partitionKeyValue), PojoizedJson.class) .flux(); } concurrencyControlSemaphore.acquire(); obs.subscribeOn(Schedulers.parallel()).subscribe(documentSubscriber); } public void run() throws Exception { readSuccessMeter = metricsRegistry.meter(" readFailureMeter = metricsRegistry.meter(" writeSuccessMeter = metricsRegistry.meter(" writeFailureMeter = metricsRegistry.meter(" querySuccessMeter = metricsRegistry.meter(" queryFailureMeter = metricsRegistry.meter(" readLatency = metricsRegistry.register("Read Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); writeLatency = metricsRegistry.register("Write Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); queryLatency = metricsRegistry.register("Query Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS); long startTime = System.currentTimeMillis(); AtomicLong count = new AtomicLong(0); long i; int writeRange = readPct + writePct; for (i = 0; BenchmarkHelper.shouldContinue(startTime, i, configuration); i++) { int index = (int) i % 100; if (index < readPct) { BenchmarkRequestSubscriber<Object> readSubscriber = new BenchmarkRequestSubscriber<>(readSuccessMeter, readFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); readSubscriber.context = readLatency.time(); performWorkload(readSubscriber, OperationType.Read, i); } else if (index < writeRange) { BenchmarkRequestSubscriber<Object> writeSubscriber = new BenchmarkRequestSubscriber<>(writeSuccessMeter, writeFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); writeSubscriber.context = writeLatency.time(); performWorkload(writeSubscriber, OperationType.Create, i); } else { BenchmarkRequestSubscriber<Object> querySubscriber = new BenchmarkRequestSubscriber<>(querySuccessMeter, queryFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); querySubscriber.context = queryLatency.time(); performWorkload(querySubscriber, OperationType.Query, i); } } synchronized (count) { while (count.get() < i) { count.wait(); } } long endTime = System.currentTimeMillis(); logger.info("[{}] operations performed in [{}] seconds.", configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); reporter.report(); reporter.close(); } private void parsedReadWriteQueryPct(String readWriteQueryPct) { String[] readWriteQueryPctList = readWriteQueryPct.split(","); if (readWriteQueryPctList.length == 3) { try { if (Integer.valueOf(readWriteQueryPctList[0]) + Integer.valueOf(readWriteQueryPctList[1]) + Integer.valueOf(readWriteQueryPctList[2]) == 100) { readPct = Integer.valueOf(readWriteQueryPctList[0]); writePct = Integer.valueOf(readWriteQueryPctList[1]); queryPct = Integer.valueOf(readWriteQueryPctList[2]); } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } catch (NumberFormatException ex) { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } private void createDatabaseAndContainers(Configuration cfg) { try { cosmosAsyncDatabase = cosmosClient.getDatabase(this.configuration.getDatabaseId()); cosmosAsyncDatabase.read().block(); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosClient.createDatabase(cfg.getDatabaseId(), ThroughputProperties.createManualThroughput(this.configuration.getThroughput())).block(); cosmosAsyncDatabase = cosmosClient.getDatabase(cfg.getDatabaseId()); logger.info("Database {} is created for this test", this.configuration.getDatabaseId()); databaseCreated = true; } else { throw e; } } int numberOfCollection = cfg.getNumberOfCollectionForCtl(); if (numberOfCollection < 1) { numberOfCollection = 1; } for (int i = 1; i <= numberOfCollection; i++) { try { CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); cosmosAsyncContainer.read().block(); containers.add(cosmosAsyncContainer); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosAsyncDatabase.createContainer( this.configuration.getCollectionId() + "_" + i, Configuration.DEFAULT_PARTITION_KEY_PATH ).block(); CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); logger.info("Collection {} is created for this test", this.configuration.getCollectionId() + "_" + i); containers.add(cosmosAsyncContainer); containerToClearAfterTest.add(cosmosAsyncContainer.getId()); } else { throw e; } } } } private void initializeReporter(Configuration configuration) { if (configuration.getGraphiteEndpoint() != null) { final Graphite graphite = new Graphite(new InetSocketAddress( configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort())); reporter = GraphiteReporter.forRegistry(metricsRegistry) .prefixedWith(configuration.getOperationType().name()) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .filter(MetricFilter.ALL) .build(graphite); } else if (configuration.getReportingDirectory() != null) { reporter = CsvReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(configuration.getReportingDirectory()); } else { reporter = ConsoleReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(); } } }
class AsyncCtlWorkload { private final String PERCENT_PARSING_ERROR = "Unable to parse user provided readWriteQueryPct "; private final String prefixUuidForCreate; private final String dataFieldValue; private final String partitionKey; private final MetricRegistry metricsRegistry = new MetricRegistry(); private final Logger logger; private final CosmosAsyncClient cosmosClient; private final Configuration configuration; private final Map<String, List<PojoizedJson>> docsToRead = new HashMap<>(); private final Semaphore concurrencyControlSemaphore; private final Random random; private Timer readLatency; private Timer writeLatency; private Timer queryLatency; private ScheduledReporter reporter; private Meter readSuccessMeter; private Meter readFailureMeter; private Meter writeSuccessMeter; private Meter writeFailureMeter; private Meter querySuccessMeter; private Meter queryFailureMeter; private CosmosAsyncDatabase cosmosAsyncDatabase; private List<CosmosAsyncContainer> containers = new ArrayList<>(); private List<String> containerToClearAfterTest = new ArrayList<>(); private boolean databaseCreated; private int readPct; private int writePct; private int queryPct; public AsyncCtlWorkload(Configuration cfg) { CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder() .endpoint(cfg.getServiceEndpoint()) .key(cfg.getMasterKey()) .consistencyLevel(cfg.getConsistencyLevel()) .contentResponseOnWriteEnabled(Boolean.parseBoolean(cfg.isContentResponseOnWriteEnabled())); if (cfg.getConnectionMode().equals(ConnectionMode.DIRECT)) { cosmosClientBuilder = cosmosClientBuilder.directMode(DirectConnectionConfig.getDefaultConfig()); } else { GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); gatewayConnectionConfig.setMaxConnectionPoolSize(cfg.getMaxConnectionPoolSize()); cosmosClientBuilder = cosmosClientBuilder.gatewayMode(gatewayConnectionConfig); } cosmosClient = cosmosClientBuilder.buildAsyncClient(); configuration = cfg; logger = LoggerFactory.getLogger(this.getClass()); parsedReadWriteQueryPct(configuration.getReadWriteQueryPct()); createDatabaseAndContainers(configuration); partitionKey = containers.get(0).read().block().getProperties().getPartitionKeyDefinition() .getPaths().iterator().next().split("/")[1]; concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency()); logger.info("PRE-populating {} documents ....", cfg.getNumberOfPreCreatedDocuments()); dataFieldValue = RandomStringUtils.randomAlphabetic(configuration.getDocumentDataFieldSize()); createPrePopulatedDocs(configuration.getNumberOfPreCreatedDocuments()); if (configuration.isEnableJvmStats()) { metricsRegistry.register("gc", new GarbageCollectorMetricSet()); metricsRegistry.register("threads", new CachedThreadStatesGaugeSet(10, TimeUnit.SECONDS)); metricsRegistry.register("memory", new MemoryUsageGaugeSet()); } initializeReporter(cfg); MeterRegistry registry = configuration.getAzureMonitorMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } registry = configuration.getGraphiteMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } prefixUuidForCreate = UUID.randomUUID().toString(); random = new Random(); } public void shutdown() { if (this.databaseCreated) { cosmosAsyncDatabase.delete().block(); logger.info("Deleted temporary database {} created for this test", this.configuration.getDatabaseId()); } else if (containerToClearAfterTest.size() > 0) { for (String id : containerToClearAfterTest) { cosmosAsyncDatabase.getContainer(id).delete().block(); logger.info("Deleted temporary collection {} created for this test", id); } } cosmosClient.close(); } private void performWorkload(BaseSubscriber<Object> documentSubscriber, OperationType type, long i) throws Exception { Flux<? extends Object> obs; CosmosAsyncContainer container = containers.get((int) i % containers.size()); if (type.equals(OperationType.Create)) { PojoizedJson data = BenchmarkHelper.generateDocument(prefixUuidForCreate + i, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); obs = container.createItem(data).flux(); } else if (type.equals(OperationType.Query)) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); String sqlQuery = "Select top 100 * from c order by c._ts"; obs = container.queryItems(sqlQuery, options, PojoizedJson.class).byPage(10); } else { int index = random.nextInt(docsToRead.get(container.getId()).size()); RequestOptions options = new RequestOptions(); String partitionKeyValue = docsToRead.get(container.getId()).get(index).getId(); options.setPartitionKey(new PartitionKey(partitionKeyValue)); obs = container.readItem(docsToRead.get(container.getId()).get(index).getId(), new PartitionKey(partitionKeyValue), PojoizedJson.class) .flux(); } concurrencyControlSemaphore.acquire(); obs.subscribeOn(Schedulers.parallel()).subscribe(documentSubscriber); } public void run() throws Exception { readSuccessMeter = metricsRegistry.meter(" readFailureMeter = metricsRegistry.meter(" writeSuccessMeter = metricsRegistry.meter(" writeFailureMeter = metricsRegistry.meter(" querySuccessMeter = metricsRegistry.meter(" queryFailureMeter = metricsRegistry.meter(" readLatency = metricsRegistry.register("Read Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); writeLatency = metricsRegistry.register("Write Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); queryLatency = metricsRegistry.register("Query Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS); long startTime = System.currentTimeMillis(); AtomicLong count = new AtomicLong(0); long i; int writeRange = readPct + writePct; for (i = 0; BenchmarkHelper.shouldContinue(startTime, i, configuration); i++) { int index = (int) i % 100; if (index < readPct) { BenchmarkRequestSubscriber<Object> readSubscriber = new BenchmarkRequestSubscriber<>(readSuccessMeter, readFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); readSubscriber.context = readLatency.time(); performWorkload(readSubscriber, OperationType.Read, i); } else if (index < writeRange) { BenchmarkRequestSubscriber<Object> writeSubscriber = new BenchmarkRequestSubscriber<>(writeSuccessMeter, writeFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); writeSubscriber.context = writeLatency.time(); performWorkload(writeSubscriber, OperationType.Create, i); } else { BenchmarkRequestSubscriber<Object> querySubscriber = new BenchmarkRequestSubscriber<>(querySuccessMeter, queryFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); querySubscriber.context = queryLatency.time(); performWorkload(querySubscriber, OperationType.Query, i); } } synchronized (count) { while (count.get() < i) { count.wait(); } } long endTime = System.currentTimeMillis(); logger.info("[{}] operations performed in [{}] seconds.", configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); reporter.report(); reporter.close(); } private void parsedReadWriteQueryPct(String readWriteQueryPct) { String[] readWriteQueryPctList = readWriteQueryPct.split(","); if (readWriteQueryPctList.length == 3) { try { if (Integer.valueOf(readWriteQueryPctList[0]) + Integer.valueOf(readWriteQueryPctList[1]) + Integer.valueOf(readWriteQueryPctList[2]) == 100) { readPct = Integer.valueOf(readWriteQueryPctList[0]); writePct = Integer.valueOf(readWriteQueryPctList[1]); queryPct = Integer.valueOf(readWriteQueryPctList[2]); } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } catch (NumberFormatException ex) { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } private void createDatabaseAndContainers(Configuration cfg) { try { cosmosAsyncDatabase = cosmosClient.getDatabase(this.configuration.getDatabaseId()); cosmosAsyncDatabase.read().block(); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosClient.createDatabase(cfg.getDatabaseId(), ThroughputProperties.createManualThroughput(this.configuration.getThroughput())).block(); cosmosAsyncDatabase = cosmosClient.getDatabase(cfg.getDatabaseId()); logger.info("Database {} is created for this test", this.configuration.getDatabaseId()); databaseCreated = true; } else { throw e; } } int numberOfCollection = cfg.getNumberOfCollectionForCtl(); if (numberOfCollection < 1) { numberOfCollection = 1; } for (int i = 1; i <= numberOfCollection; i++) { try { CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); cosmosAsyncContainer.read().block(); containers.add(cosmosAsyncContainer); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosAsyncDatabase.createContainer( this.configuration.getCollectionId() + "_" + i, Configuration.DEFAULT_PARTITION_KEY_PATH ).block(); CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); logger.info("Collection {} is created for this test", this.configuration.getCollectionId() + "_" + i); containers.add(cosmosAsyncContainer); containerToClearAfterTest.add(cosmosAsyncContainer.getId()); } else { throw e; } } } } private void initializeReporter(Configuration configuration) { if (configuration.getGraphiteEndpoint() != null) { final Graphite graphite = new Graphite(new InetSocketAddress( configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort())); reporter = GraphiteReporter.forRegistry(metricsRegistry) .prefixedWith(configuration.getOperationType().name()) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .filter(MetricFilter.ALL) .build(graphite); } else if (configuration.getReportingDirectory() != null) { reporter = CsvReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(configuration.getReportingDirectory()); } else { reporter = ConsoleReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(); } } }
We are not throwing error in case of pre-populating failures, is this intentional ?
private void createPrePopulatedDocs(int numberOfPreCreatedDocuments) { for (CosmosAsyncContainer container : containers) { AtomicLong successCount = new AtomicLong(0); AtomicLong failureCount = new AtomicLong(0); ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>(); for (int i = 0; i < numberOfPreCreatedDocuments; i++) { String uId = UUID.randomUUID().toString(); PojoizedJson newDoc = BenchmarkHelper.generateDocument(uId, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); Flux<PojoizedJson> obs = container.createItem(newDoc).map(resp -> { PojoizedJson x = resp.getItem(); return x; }).onErrorResume(throwable -> { failureCount.incrementAndGet(); logger.error("Error during pre populating item ", throwable.getMessage()); return Mono.empty(); }).doOnSuccess(pojoizedJson -> { successCount.incrementAndGet(); }).flux(); createDocumentObservables.add(obs); } docsToRead.put(container.getId(), Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block()); logger.info("Finished pre-populating {} documents for container {}", successCount.get() - failureCount.get(), container.getId()); if (failureCount.get() > 0) { logger.info("Failed pre-populating {} documents for container {}", failureCount.get(), container.getId()); } } }
return Mono.empty();
private void createPrePopulatedDocs(int numberOfPreCreatedDocuments) { for (CosmosAsyncContainer container : containers) { AtomicLong successCount = new AtomicLong(0); AtomicLong failureCount = new AtomicLong(0); ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>(); for (int i = 0; i < numberOfPreCreatedDocuments; i++) { String uId = UUID.randomUUID().toString(); PojoizedJson newDoc = BenchmarkHelper.generateDocument(uId, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); Flux<PojoizedJson> obs = container.createItem(newDoc).map(resp -> { PojoizedJson x = resp.getItem(); return x; }).onErrorResume(throwable -> { failureCount.incrementAndGet(); logger.error("Error during pre populating item ", throwable.getMessage()); return Mono.empty(); }).doOnSuccess(pojoizedJson -> { successCount.incrementAndGet(); }).flux(); createDocumentObservables.add(obs); } docsToRead.put(container.getId(), Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block()); logger.info("Finished pre-populating {} documents for container {}", successCount.get() - failureCount.get(), container.getId()); if (failureCount.get() > 0) { logger.info("Failed pre-populating {} documents for container {}", failureCount.get(), container.getId()); } } }
class AsyncCtlWorkload { private final String PERCENT_PARSING_ERROR = "Unable to parse user provided readWriteQueryPct "; private final String prefixUuidForCreate; private final String dataFieldValue; private final String partitionKey; private final MetricRegistry metricsRegistry = new MetricRegistry(); private final Logger logger; private final CosmosAsyncClient cosmosClient; private final Configuration configuration; private final Map<String, List<PojoizedJson>> docsToRead = new HashMap<>(); private final Semaphore concurrencyControlSemaphore; private final Random random; private Timer readLatency; private Timer writeLatency; private Timer queryLatency; private ScheduledReporter reporter; private Meter readSuccessMeter; private Meter readFailureMeter; private Meter writeSuccessMeter; private Meter writeFailureMeter; private Meter querySuccessMeter; private Meter queryFailureMeter; private CosmosAsyncDatabase cosmosAsyncDatabase; private List<CosmosAsyncContainer> containers = new ArrayList<>(); private List<String> containerToClearAfterTest = new ArrayList<>(); private boolean databaseCreated; private int readPct; private int writePct; private int queryPct; public AsyncCtlWorkload(Configuration cfg) { CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder() .endpoint(cfg.getServiceEndpoint()) .key(cfg.getMasterKey()) .consistencyLevel(cfg.getConsistencyLevel()) .contentResponseOnWriteEnabled(Boolean.parseBoolean(cfg.isContentResponseOnWriteEnabled())); if (cfg.getConnectionMode().equals(ConnectionMode.DIRECT)) { cosmosClientBuilder = cosmosClientBuilder.directMode(DirectConnectionConfig.getDefaultConfig()); } else { GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); gatewayConnectionConfig.setMaxConnectionPoolSize(cfg.getMaxConnectionPoolSize()); cosmosClientBuilder = cosmosClientBuilder.gatewayMode(gatewayConnectionConfig); } cosmosClient = cosmosClientBuilder.buildAsyncClient(); configuration = cfg; logger = LoggerFactory.getLogger(this.getClass()); parsedReadWriteQueryPct(configuration.getReadWriteQueryPct()); createDatabaseAndContainers(configuration); partitionKey = containers.get(0).read().block().getProperties().getPartitionKeyDefinition() .getPaths().iterator().next().split("/")[1]; concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency()); logger.info("PRE-populating {} documents ....", cfg.getNumberOfPreCreatedDocuments()); dataFieldValue = RandomStringUtils.randomAlphabetic(configuration.getDocumentDataFieldSize()); createPrePopulatedDocs(configuration.getNumberOfPreCreatedDocuments()); if (configuration.isEnableJvmStats()) { metricsRegistry.register("gc", new GarbageCollectorMetricSet()); metricsRegistry.register("threads", new CachedThreadStatesGaugeSet(10, TimeUnit.SECONDS)); metricsRegistry.register("memory", new MemoryUsageGaugeSet()); } initializeReporter(cfg); MeterRegistry registry = configuration.getAzureMonitorMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } registry = configuration.getGraphiteMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } prefixUuidForCreate = UUID.randomUUID().toString(); random = new Random(); } public void shutdown() { if (this.databaseCreated) { cosmosAsyncDatabase.delete().block(); logger.info("Deleted temporary database {} created for this test", this.configuration.getDatabaseId()); } else if (containerToClearAfterTest.size() > 0) { for (String id : containerToClearAfterTest) { cosmosAsyncDatabase.getContainer(id).delete().block(); logger.info("Deleted temporary collection {} created for this test", id); } } cosmosClient.close(); } private void performWorkload(BaseSubscriber<Object> documentSubscriber, OperationType type, long i) throws Exception { Flux<? extends Object> obs; CosmosAsyncContainer container = containers.get((int) i % containers.size()); if (type.equals(OperationType.Create)) { PojoizedJson data = BenchmarkHelper.generateDocument(prefixUuidForCreate + i, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); obs = container.createItem(data).flux(); } else if (type.equals(OperationType.Query)) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); String sqlQuery = "Select top 100 * from c order by c._ts"; obs = container.queryItems(sqlQuery, options, PojoizedJson.class).byPage(10); } else { int index = random.nextInt(docsToRead.get(container.getId()).size()); RequestOptions options = new RequestOptions(); String partitionKeyValue = docsToRead.get(container.getId()).get(index).getId(); options.setPartitionKey(new PartitionKey(partitionKeyValue)); obs = container.readItem(docsToRead.get(container.getId()).get(index).getId(), new PartitionKey(partitionKeyValue), PojoizedJson.class) .flux(); } concurrencyControlSemaphore.acquire(); obs.subscribeOn(Schedulers.parallel()).subscribe(documentSubscriber); } public void run() throws Exception { readSuccessMeter = metricsRegistry.meter(" readFailureMeter = metricsRegistry.meter(" writeSuccessMeter = metricsRegistry.meter(" writeFailureMeter = metricsRegistry.meter(" querySuccessMeter = metricsRegistry.meter(" queryFailureMeter = metricsRegistry.meter(" readLatency = metricsRegistry.register("Read Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); writeLatency = metricsRegistry.register("Write Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); queryLatency = metricsRegistry.register("Query Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS); long startTime = System.currentTimeMillis(); AtomicLong count = new AtomicLong(0); long i; int writeRange = readPct + writePct; for (i = 0; BenchmarkHelper.shouldContinue(startTime, i, configuration); i++) { int index = (int) i % 100; if (index < readPct) { BenchmarkRequestSubscriber<Object> readSubscriber = new BenchmarkRequestSubscriber<>(readSuccessMeter, readFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); readSubscriber.context = readLatency.time(); performWorkload(readSubscriber, OperationType.Read, i); } else if (index < writeRange) { BenchmarkRequestSubscriber<Object> writeSubscriber = new BenchmarkRequestSubscriber<>(writeSuccessMeter, writeFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); writeSubscriber.context = writeLatency.time(); performWorkload(writeSubscriber, OperationType.Create, i); } else { BenchmarkRequestSubscriber<Object> querySubscriber = new BenchmarkRequestSubscriber<>(querySuccessMeter, queryFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); querySubscriber.context = queryLatency.time(); performWorkload(querySubscriber, OperationType.Query, i); } } synchronized (count) { while (count.get() < i) { count.wait(); } } long endTime = System.currentTimeMillis(); logger.info("[{}] operations performed in [{}] seconds.", configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); reporter.report(); reporter.close(); } private void parsedReadWriteQueryPct(String readWriteQueryPct) { String[] readWriteQueryPctList = readWriteQueryPct.split(","); if (readWriteQueryPctList.length == 3) { try { if (Integer.valueOf(readWriteQueryPctList[0]) + Integer.valueOf(readWriteQueryPctList[1]) + Integer.valueOf(readWriteQueryPctList[2]) == 100) { readPct = Integer.valueOf(readWriteQueryPctList[0]); writePct = Integer.valueOf(readWriteQueryPctList[1]); queryPct = Integer.valueOf(readWriteQueryPctList[2]); } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } catch (NumberFormatException ex) { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } private void createDatabaseAndContainers(Configuration cfg) { try { cosmosAsyncDatabase = cosmosClient.getDatabase(this.configuration.getDatabaseId()); cosmosAsyncDatabase.read().block(); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosClient.createDatabase(cfg.getDatabaseId(), ThroughputProperties.createManualThroughput(this.configuration.getThroughput())).block(); cosmosAsyncDatabase = cosmosClient.getDatabase(cfg.getDatabaseId()); logger.info("Database {} is created for this test", this.configuration.getDatabaseId()); databaseCreated = true; } else { throw e; } } int numberOfCollection = cfg.getNumberOfCollectionForCtl(); if (numberOfCollection < 1) { numberOfCollection = 1; } for (int i = 1; i <= numberOfCollection; i++) { try { CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); cosmosAsyncContainer.read().block(); containers.add(cosmosAsyncContainer); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosAsyncDatabase.createContainer( this.configuration.getCollectionId() + "_" + i, Configuration.DEFAULT_PARTITION_KEY_PATH ).block(); CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); logger.info("Collection {} is created for this test", this.configuration.getCollectionId() + "_" + i); containers.add(cosmosAsyncContainer); containerToClearAfterTest.add(cosmosAsyncContainer.getId()); } else { throw e; } } } } private void initializeReporter(Configuration configuration) { if (configuration.getGraphiteEndpoint() != null) { final Graphite graphite = new Graphite(new InetSocketAddress( configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort())); reporter = GraphiteReporter.forRegistry(metricsRegistry) .prefixedWith(configuration.getOperationType().name()) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .filter(MetricFilter.ALL) .build(graphite); } else if (configuration.getReportingDirectory() != null) { reporter = CsvReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(configuration.getReportingDirectory()); } else { reporter = ConsoleReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(); } } }
class AsyncCtlWorkload { private final String PERCENT_PARSING_ERROR = "Unable to parse user provided readWriteQueryPct "; private final String prefixUuidForCreate; private final String dataFieldValue; private final String partitionKey; private final MetricRegistry metricsRegistry = new MetricRegistry(); private final Logger logger; private final CosmosAsyncClient cosmosClient; private final Configuration configuration; private final Map<String, List<PojoizedJson>> docsToRead = new HashMap<>(); private final Semaphore concurrencyControlSemaphore; private final Random random; private Timer readLatency; private Timer writeLatency; private Timer queryLatency; private ScheduledReporter reporter; private Meter readSuccessMeter; private Meter readFailureMeter; private Meter writeSuccessMeter; private Meter writeFailureMeter; private Meter querySuccessMeter; private Meter queryFailureMeter; private CosmosAsyncDatabase cosmosAsyncDatabase; private List<CosmosAsyncContainer> containers = new ArrayList<>(); private List<String> containerToClearAfterTest = new ArrayList<>(); private boolean databaseCreated; private int readPct; private int writePct; private int queryPct; public AsyncCtlWorkload(Configuration cfg) { CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder() .endpoint(cfg.getServiceEndpoint()) .key(cfg.getMasterKey()) .consistencyLevel(cfg.getConsistencyLevel()) .contentResponseOnWriteEnabled(Boolean.parseBoolean(cfg.isContentResponseOnWriteEnabled())); if (cfg.getConnectionMode().equals(ConnectionMode.DIRECT)) { cosmosClientBuilder = cosmosClientBuilder.directMode(DirectConnectionConfig.getDefaultConfig()); } else { GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); gatewayConnectionConfig.setMaxConnectionPoolSize(cfg.getMaxConnectionPoolSize()); cosmosClientBuilder = cosmosClientBuilder.gatewayMode(gatewayConnectionConfig); } cosmosClient = cosmosClientBuilder.buildAsyncClient(); configuration = cfg; logger = LoggerFactory.getLogger(this.getClass()); parsedReadWriteQueryPct(configuration.getReadWriteQueryPct()); createDatabaseAndContainers(configuration); partitionKey = containers.get(0).read().block().getProperties().getPartitionKeyDefinition() .getPaths().iterator().next().split("/")[1]; concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency()); logger.info("PRE-populating {} documents ....", cfg.getNumberOfPreCreatedDocuments()); dataFieldValue = RandomStringUtils.randomAlphabetic(configuration.getDocumentDataFieldSize()); createPrePopulatedDocs(configuration.getNumberOfPreCreatedDocuments()); if (configuration.isEnableJvmStats()) { metricsRegistry.register("gc", new GarbageCollectorMetricSet()); metricsRegistry.register("threads", new CachedThreadStatesGaugeSet(10, TimeUnit.SECONDS)); metricsRegistry.register("memory", new MemoryUsageGaugeSet()); } initializeReporter(cfg); MeterRegistry registry = configuration.getAzureMonitorMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } registry = configuration.getGraphiteMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } prefixUuidForCreate = UUID.randomUUID().toString(); random = new Random(); } public void shutdown() { if (this.databaseCreated) { cosmosAsyncDatabase.delete().block(); logger.info("Deleted temporary database {} created for this test", this.configuration.getDatabaseId()); } else if (containerToClearAfterTest.size() > 0) { for (String id : containerToClearAfterTest) { cosmosAsyncDatabase.getContainer(id).delete().block(); logger.info("Deleted temporary collection {} created for this test", id); } } cosmosClient.close(); } private void performWorkload(BaseSubscriber<Object> documentSubscriber, OperationType type, long i) throws Exception { Flux<? extends Object> obs; CosmosAsyncContainer container = containers.get((int) i % containers.size()); if (type.equals(OperationType.Create)) { PojoizedJson data = BenchmarkHelper.generateDocument(prefixUuidForCreate + i, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); obs = container.createItem(data).flux(); } else if (type.equals(OperationType.Query)) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); String sqlQuery = "Select top 100 * from c order by c._ts"; obs = container.queryItems(sqlQuery, options, PojoizedJson.class).byPage(10); } else { int index = random.nextInt(docsToRead.get(container.getId()).size()); RequestOptions options = new RequestOptions(); String partitionKeyValue = docsToRead.get(container.getId()).get(index).getId(); options.setPartitionKey(new PartitionKey(partitionKeyValue)); obs = container.readItem(docsToRead.get(container.getId()).get(index).getId(), new PartitionKey(partitionKeyValue), PojoizedJson.class) .flux(); } concurrencyControlSemaphore.acquire(); obs.subscribeOn(Schedulers.parallel()).subscribe(documentSubscriber); } public void run() throws Exception { readSuccessMeter = metricsRegistry.meter(" readFailureMeter = metricsRegistry.meter(" writeSuccessMeter = metricsRegistry.meter(" writeFailureMeter = metricsRegistry.meter(" querySuccessMeter = metricsRegistry.meter(" queryFailureMeter = metricsRegistry.meter(" readLatency = metricsRegistry.register("Read Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); writeLatency = metricsRegistry.register("Write Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); queryLatency = metricsRegistry.register("Query Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS); long startTime = System.currentTimeMillis(); AtomicLong count = new AtomicLong(0); long i; int writeRange = readPct + writePct; for (i = 0; BenchmarkHelper.shouldContinue(startTime, i, configuration); i++) { int index = (int) i % 100; if (index < readPct) { BenchmarkRequestSubscriber<Object> readSubscriber = new BenchmarkRequestSubscriber<>(readSuccessMeter, readFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); readSubscriber.context = readLatency.time(); performWorkload(readSubscriber, OperationType.Read, i); } else if (index < writeRange) { BenchmarkRequestSubscriber<Object> writeSubscriber = new BenchmarkRequestSubscriber<>(writeSuccessMeter, writeFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); writeSubscriber.context = writeLatency.time(); performWorkload(writeSubscriber, OperationType.Create, i); } else { BenchmarkRequestSubscriber<Object> querySubscriber = new BenchmarkRequestSubscriber<>(querySuccessMeter, queryFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); querySubscriber.context = queryLatency.time(); performWorkload(querySubscriber, OperationType.Query, i); } } synchronized (count) { while (count.get() < i) { count.wait(); } } long endTime = System.currentTimeMillis(); logger.info("[{}] operations performed in [{}] seconds.", configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); reporter.report(); reporter.close(); } private void parsedReadWriteQueryPct(String readWriteQueryPct) { String[] readWriteQueryPctList = readWriteQueryPct.split(","); if (readWriteQueryPctList.length == 3) { try { if (Integer.valueOf(readWriteQueryPctList[0]) + Integer.valueOf(readWriteQueryPctList[1]) + Integer.valueOf(readWriteQueryPctList[2]) == 100) { readPct = Integer.valueOf(readWriteQueryPctList[0]); writePct = Integer.valueOf(readWriteQueryPctList[1]); queryPct = Integer.valueOf(readWriteQueryPctList[2]); } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } catch (NumberFormatException ex) { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } private void createDatabaseAndContainers(Configuration cfg) { try { cosmosAsyncDatabase = cosmosClient.getDatabase(this.configuration.getDatabaseId()); cosmosAsyncDatabase.read().block(); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosClient.createDatabase(cfg.getDatabaseId(), ThroughputProperties.createManualThroughput(this.configuration.getThroughput())).block(); cosmosAsyncDatabase = cosmosClient.getDatabase(cfg.getDatabaseId()); logger.info("Database {} is created for this test", this.configuration.getDatabaseId()); databaseCreated = true; } else { throw e; } } int numberOfCollection = cfg.getNumberOfCollectionForCtl(); if (numberOfCollection < 1) { numberOfCollection = 1; } for (int i = 1; i <= numberOfCollection; i++) { try { CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); cosmosAsyncContainer.read().block(); containers.add(cosmosAsyncContainer); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosAsyncDatabase.createContainer( this.configuration.getCollectionId() + "_" + i, Configuration.DEFAULT_PARTITION_KEY_PATH ).block(); CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); logger.info("Collection {} is created for this test", this.configuration.getCollectionId() + "_" + i); containers.add(cosmosAsyncContainer); containerToClearAfterTest.add(cosmosAsyncContainer.getId()); } else { throw e; } } } } private void initializeReporter(Configuration configuration) { if (configuration.getGraphiteEndpoint() != null) { final Graphite graphite = new Graphite(new InetSocketAddress( configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort())); reporter = GraphiteReporter.forRegistry(metricsRegistry) .prefixedWith(configuration.getOperationType().name()) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .filter(MetricFilter.ALL) .build(graphite); } else if (configuration.getReportingDirectory() != null) { reporter = CsvReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(configuration.getReportingDirectory()); } else { reporter = ConsoleReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(); } } }
Yes this is intentional , this was causing ctl failure if there was single failure on pre populated document create.
private void createPrePopulatedDocs(int numberOfPreCreatedDocuments) { for (CosmosAsyncContainer container : containers) { AtomicLong successCount = new AtomicLong(0); AtomicLong failureCount = new AtomicLong(0); ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>(); for (int i = 0; i < numberOfPreCreatedDocuments; i++) { String uId = UUID.randomUUID().toString(); PojoizedJson newDoc = BenchmarkHelper.generateDocument(uId, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); Flux<PojoizedJson> obs = container.createItem(newDoc).map(resp -> { PojoizedJson x = resp.getItem(); return x; }).onErrorResume(throwable -> { failureCount.incrementAndGet(); logger.error("Error during pre populating item ", throwable.getMessage()); return Mono.empty(); }).doOnSuccess(pojoizedJson -> { successCount.incrementAndGet(); }).flux(); createDocumentObservables.add(obs); } docsToRead.put(container.getId(), Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block()); logger.info("Finished pre-populating {} documents for container {}", successCount.get() - failureCount.get(), container.getId()); if (failureCount.get() > 0) { logger.info("Failed pre-populating {} documents for container {}", failureCount.get(), container.getId()); } } }
return Mono.empty();
private void createPrePopulatedDocs(int numberOfPreCreatedDocuments) { for (CosmosAsyncContainer container : containers) { AtomicLong successCount = new AtomicLong(0); AtomicLong failureCount = new AtomicLong(0); ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>(); for (int i = 0; i < numberOfPreCreatedDocuments; i++) { String uId = UUID.randomUUID().toString(); PojoizedJson newDoc = BenchmarkHelper.generateDocument(uId, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); Flux<PojoizedJson> obs = container.createItem(newDoc).map(resp -> { PojoizedJson x = resp.getItem(); return x; }).onErrorResume(throwable -> { failureCount.incrementAndGet(); logger.error("Error during pre populating item ", throwable.getMessage()); return Mono.empty(); }).doOnSuccess(pojoizedJson -> { successCount.incrementAndGet(); }).flux(); createDocumentObservables.add(obs); } docsToRead.put(container.getId(), Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block()); logger.info("Finished pre-populating {} documents for container {}", successCount.get() - failureCount.get(), container.getId()); if (failureCount.get() > 0) { logger.info("Failed pre-populating {} documents for container {}", failureCount.get(), container.getId()); } } }
class AsyncCtlWorkload { private final String PERCENT_PARSING_ERROR = "Unable to parse user provided readWriteQueryPct "; private final String prefixUuidForCreate; private final String dataFieldValue; private final String partitionKey; private final MetricRegistry metricsRegistry = new MetricRegistry(); private final Logger logger; private final CosmosAsyncClient cosmosClient; private final Configuration configuration; private final Map<String, List<PojoizedJson>> docsToRead = new HashMap<>(); private final Semaphore concurrencyControlSemaphore; private final Random random; private Timer readLatency; private Timer writeLatency; private Timer queryLatency; private ScheduledReporter reporter; private Meter readSuccessMeter; private Meter readFailureMeter; private Meter writeSuccessMeter; private Meter writeFailureMeter; private Meter querySuccessMeter; private Meter queryFailureMeter; private CosmosAsyncDatabase cosmosAsyncDatabase; private List<CosmosAsyncContainer> containers = new ArrayList<>(); private List<String> containerToClearAfterTest = new ArrayList<>(); private boolean databaseCreated; private int readPct; private int writePct; private int queryPct; public AsyncCtlWorkload(Configuration cfg) { CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder() .endpoint(cfg.getServiceEndpoint()) .key(cfg.getMasterKey()) .consistencyLevel(cfg.getConsistencyLevel()) .contentResponseOnWriteEnabled(Boolean.parseBoolean(cfg.isContentResponseOnWriteEnabled())); if (cfg.getConnectionMode().equals(ConnectionMode.DIRECT)) { cosmosClientBuilder = cosmosClientBuilder.directMode(DirectConnectionConfig.getDefaultConfig()); } else { GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); gatewayConnectionConfig.setMaxConnectionPoolSize(cfg.getMaxConnectionPoolSize()); cosmosClientBuilder = cosmosClientBuilder.gatewayMode(gatewayConnectionConfig); } cosmosClient = cosmosClientBuilder.buildAsyncClient(); configuration = cfg; logger = LoggerFactory.getLogger(this.getClass()); parsedReadWriteQueryPct(configuration.getReadWriteQueryPct()); createDatabaseAndContainers(configuration); partitionKey = containers.get(0).read().block().getProperties().getPartitionKeyDefinition() .getPaths().iterator().next().split("/")[1]; concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency()); logger.info("PRE-populating {} documents ....", cfg.getNumberOfPreCreatedDocuments()); dataFieldValue = RandomStringUtils.randomAlphabetic(configuration.getDocumentDataFieldSize()); createPrePopulatedDocs(configuration.getNumberOfPreCreatedDocuments()); if (configuration.isEnableJvmStats()) { metricsRegistry.register("gc", new GarbageCollectorMetricSet()); metricsRegistry.register("threads", new CachedThreadStatesGaugeSet(10, TimeUnit.SECONDS)); metricsRegistry.register("memory", new MemoryUsageGaugeSet()); } initializeReporter(cfg); MeterRegistry registry = configuration.getAzureMonitorMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } registry = configuration.getGraphiteMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } prefixUuidForCreate = UUID.randomUUID().toString(); random = new Random(); } public void shutdown() { if (this.databaseCreated) { cosmosAsyncDatabase.delete().block(); logger.info("Deleted temporary database {} created for this test", this.configuration.getDatabaseId()); } else if (containerToClearAfterTest.size() > 0) { for (String id : containerToClearAfterTest) { cosmosAsyncDatabase.getContainer(id).delete().block(); logger.info("Deleted temporary collection {} created for this test", id); } } cosmosClient.close(); } private void performWorkload(BaseSubscriber<Object> documentSubscriber, OperationType type, long i) throws Exception { Flux<? extends Object> obs; CosmosAsyncContainer container = containers.get((int) i % containers.size()); if (type.equals(OperationType.Create)) { PojoizedJson data = BenchmarkHelper.generateDocument(prefixUuidForCreate + i, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); obs = container.createItem(data).flux(); } else if (type.equals(OperationType.Query)) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); String sqlQuery = "Select top 100 * from c order by c._ts"; obs = container.queryItems(sqlQuery, options, PojoizedJson.class).byPage(10); } else { int index = random.nextInt(docsToRead.get(container.getId()).size()); RequestOptions options = new RequestOptions(); String partitionKeyValue = docsToRead.get(container.getId()).get(index).getId(); options.setPartitionKey(new PartitionKey(partitionKeyValue)); obs = container.readItem(docsToRead.get(container.getId()).get(index).getId(), new PartitionKey(partitionKeyValue), PojoizedJson.class) .flux(); } concurrencyControlSemaphore.acquire(); obs.subscribeOn(Schedulers.parallel()).subscribe(documentSubscriber); } public void run() throws Exception { readSuccessMeter = metricsRegistry.meter(" readFailureMeter = metricsRegistry.meter(" writeSuccessMeter = metricsRegistry.meter(" writeFailureMeter = metricsRegistry.meter(" querySuccessMeter = metricsRegistry.meter(" queryFailureMeter = metricsRegistry.meter(" readLatency = metricsRegistry.register("Read Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); writeLatency = metricsRegistry.register("Write Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); queryLatency = metricsRegistry.register("Query Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS); long startTime = System.currentTimeMillis(); AtomicLong count = new AtomicLong(0); long i; int writeRange = readPct + writePct; for (i = 0; BenchmarkHelper.shouldContinue(startTime, i, configuration); i++) { int index = (int) i % 100; if (index < readPct) { BenchmarkRequestSubscriber<Object> readSubscriber = new BenchmarkRequestSubscriber<>(readSuccessMeter, readFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); readSubscriber.context = readLatency.time(); performWorkload(readSubscriber, OperationType.Read, i); } else if (index < writeRange) { BenchmarkRequestSubscriber<Object> writeSubscriber = new BenchmarkRequestSubscriber<>(writeSuccessMeter, writeFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); writeSubscriber.context = writeLatency.time(); performWorkload(writeSubscriber, OperationType.Create, i); } else { BenchmarkRequestSubscriber<Object> querySubscriber = new BenchmarkRequestSubscriber<>(querySuccessMeter, queryFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); querySubscriber.context = queryLatency.time(); performWorkload(querySubscriber, OperationType.Query, i); } } synchronized (count) { while (count.get() < i) { count.wait(); } } long endTime = System.currentTimeMillis(); logger.info("[{}] operations performed in [{}] seconds.", configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); reporter.report(); reporter.close(); } private void parsedReadWriteQueryPct(String readWriteQueryPct) { String[] readWriteQueryPctList = readWriteQueryPct.split(","); if (readWriteQueryPctList.length == 3) { try { if (Integer.valueOf(readWriteQueryPctList[0]) + Integer.valueOf(readWriteQueryPctList[1]) + Integer.valueOf(readWriteQueryPctList[2]) == 100) { readPct = Integer.valueOf(readWriteQueryPctList[0]); writePct = Integer.valueOf(readWriteQueryPctList[1]); queryPct = Integer.valueOf(readWriteQueryPctList[2]); } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } catch (NumberFormatException ex) { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } private void createDatabaseAndContainers(Configuration cfg) { try { cosmosAsyncDatabase = cosmosClient.getDatabase(this.configuration.getDatabaseId()); cosmosAsyncDatabase.read().block(); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosClient.createDatabase(cfg.getDatabaseId(), ThroughputProperties.createManualThroughput(this.configuration.getThroughput())).block(); cosmosAsyncDatabase = cosmosClient.getDatabase(cfg.getDatabaseId()); logger.info("Database {} is created for this test", this.configuration.getDatabaseId()); databaseCreated = true; } else { throw e; } } int numberOfCollection = cfg.getNumberOfCollectionForCtl(); if (numberOfCollection < 1) { numberOfCollection = 1; } for (int i = 1; i <= numberOfCollection; i++) { try { CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); cosmosAsyncContainer.read().block(); containers.add(cosmosAsyncContainer); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosAsyncDatabase.createContainer( this.configuration.getCollectionId() + "_" + i, Configuration.DEFAULT_PARTITION_KEY_PATH ).block(); CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); logger.info("Collection {} is created for this test", this.configuration.getCollectionId() + "_" + i); containers.add(cosmosAsyncContainer); containerToClearAfterTest.add(cosmosAsyncContainer.getId()); } else { throw e; } } } } private void initializeReporter(Configuration configuration) { if (configuration.getGraphiteEndpoint() != null) { final Graphite graphite = new Graphite(new InetSocketAddress( configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort())); reporter = GraphiteReporter.forRegistry(metricsRegistry) .prefixedWith(configuration.getOperationType().name()) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .filter(MetricFilter.ALL) .build(graphite); } else if (configuration.getReportingDirectory() != null) { reporter = CsvReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(configuration.getReportingDirectory()); } else { reporter = ConsoleReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(); } } }
class AsyncCtlWorkload { private final String PERCENT_PARSING_ERROR = "Unable to parse user provided readWriteQueryPct "; private final String prefixUuidForCreate; private final String dataFieldValue; private final String partitionKey; private final MetricRegistry metricsRegistry = new MetricRegistry(); private final Logger logger; private final CosmosAsyncClient cosmosClient; private final Configuration configuration; private final Map<String, List<PojoizedJson>> docsToRead = new HashMap<>(); private final Semaphore concurrencyControlSemaphore; private final Random random; private Timer readLatency; private Timer writeLatency; private Timer queryLatency; private ScheduledReporter reporter; private Meter readSuccessMeter; private Meter readFailureMeter; private Meter writeSuccessMeter; private Meter writeFailureMeter; private Meter querySuccessMeter; private Meter queryFailureMeter; private CosmosAsyncDatabase cosmosAsyncDatabase; private List<CosmosAsyncContainer> containers = new ArrayList<>(); private List<String> containerToClearAfterTest = new ArrayList<>(); private boolean databaseCreated; private int readPct; private int writePct; private int queryPct; public AsyncCtlWorkload(Configuration cfg) { CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder() .endpoint(cfg.getServiceEndpoint()) .key(cfg.getMasterKey()) .consistencyLevel(cfg.getConsistencyLevel()) .contentResponseOnWriteEnabled(Boolean.parseBoolean(cfg.isContentResponseOnWriteEnabled())); if (cfg.getConnectionMode().equals(ConnectionMode.DIRECT)) { cosmosClientBuilder = cosmosClientBuilder.directMode(DirectConnectionConfig.getDefaultConfig()); } else { GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); gatewayConnectionConfig.setMaxConnectionPoolSize(cfg.getMaxConnectionPoolSize()); cosmosClientBuilder = cosmosClientBuilder.gatewayMode(gatewayConnectionConfig); } cosmosClient = cosmosClientBuilder.buildAsyncClient(); configuration = cfg; logger = LoggerFactory.getLogger(this.getClass()); parsedReadWriteQueryPct(configuration.getReadWriteQueryPct()); createDatabaseAndContainers(configuration); partitionKey = containers.get(0).read().block().getProperties().getPartitionKeyDefinition() .getPaths().iterator().next().split("/")[1]; concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency()); logger.info("PRE-populating {} documents ....", cfg.getNumberOfPreCreatedDocuments()); dataFieldValue = RandomStringUtils.randomAlphabetic(configuration.getDocumentDataFieldSize()); createPrePopulatedDocs(configuration.getNumberOfPreCreatedDocuments()); if (configuration.isEnableJvmStats()) { metricsRegistry.register("gc", new GarbageCollectorMetricSet()); metricsRegistry.register("threads", new CachedThreadStatesGaugeSet(10, TimeUnit.SECONDS)); metricsRegistry.register("memory", new MemoryUsageGaugeSet()); } initializeReporter(cfg); MeterRegistry registry = configuration.getAzureMonitorMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } registry = configuration.getGraphiteMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } prefixUuidForCreate = UUID.randomUUID().toString(); random = new Random(); } public void shutdown() { if (this.databaseCreated) { cosmosAsyncDatabase.delete().block(); logger.info("Deleted temporary database {} created for this test", this.configuration.getDatabaseId()); } else if (containerToClearAfterTest.size() > 0) { for (String id : containerToClearAfterTest) { cosmosAsyncDatabase.getContainer(id).delete().block(); logger.info("Deleted temporary collection {} created for this test", id); } } cosmosClient.close(); } private void performWorkload(BaseSubscriber<Object> documentSubscriber, OperationType type, long i) throws Exception { Flux<? extends Object> obs; CosmosAsyncContainer container = containers.get((int) i % containers.size()); if (type.equals(OperationType.Create)) { PojoizedJson data = BenchmarkHelper.generateDocument(prefixUuidForCreate + i, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); obs = container.createItem(data).flux(); } else if (type.equals(OperationType.Query)) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); String sqlQuery = "Select top 100 * from c order by c._ts"; obs = container.queryItems(sqlQuery, options, PojoizedJson.class).byPage(10); } else { int index = random.nextInt(docsToRead.get(container.getId()).size()); RequestOptions options = new RequestOptions(); String partitionKeyValue = docsToRead.get(container.getId()).get(index).getId(); options.setPartitionKey(new PartitionKey(partitionKeyValue)); obs = container.readItem(docsToRead.get(container.getId()).get(index).getId(), new PartitionKey(partitionKeyValue), PojoizedJson.class) .flux(); } concurrencyControlSemaphore.acquire(); obs.subscribeOn(Schedulers.parallel()).subscribe(documentSubscriber); } public void run() throws Exception { readSuccessMeter = metricsRegistry.meter(" readFailureMeter = metricsRegistry.meter(" writeSuccessMeter = metricsRegistry.meter(" writeFailureMeter = metricsRegistry.meter(" querySuccessMeter = metricsRegistry.meter(" queryFailureMeter = metricsRegistry.meter(" readLatency = metricsRegistry.register("Read Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); writeLatency = metricsRegistry.register("Write Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); queryLatency = metricsRegistry.register("Query Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS); long startTime = System.currentTimeMillis(); AtomicLong count = new AtomicLong(0); long i; int writeRange = readPct + writePct; for (i = 0; BenchmarkHelper.shouldContinue(startTime, i, configuration); i++) { int index = (int) i % 100; if (index < readPct) { BenchmarkRequestSubscriber<Object> readSubscriber = new BenchmarkRequestSubscriber<>(readSuccessMeter, readFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); readSubscriber.context = readLatency.time(); performWorkload(readSubscriber, OperationType.Read, i); } else if (index < writeRange) { BenchmarkRequestSubscriber<Object> writeSubscriber = new BenchmarkRequestSubscriber<>(writeSuccessMeter, writeFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); writeSubscriber.context = writeLatency.time(); performWorkload(writeSubscriber, OperationType.Create, i); } else { BenchmarkRequestSubscriber<Object> querySubscriber = new BenchmarkRequestSubscriber<>(querySuccessMeter, queryFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); querySubscriber.context = queryLatency.time(); performWorkload(querySubscriber, OperationType.Query, i); } } synchronized (count) { while (count.get() < i) { count.wait(); } } long endTime = System.currentTimeMillis(); logger.info("[{}] operations performed in [{}] seconds.", configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); reporter.report(); reporter.close(); } private void parsedReadWriteQueryPct(String readWriteQueryPct) { String[] readWriteQueryPctList = readWriteQueryPct.split(","); if (readWriteQueryPctList.length == 3) { try { if (Integer.valueOf(readWriteQueryPctList[0]) + Integer.valueOf(readWriteQueryPctList[1]) + Integer.valueOf(readWriteQueryPctList[2]) == 100) { readPct = Integer.valueOf(readWriteQueryPctList[0]); writePct = Integer.valueOf(readWriteQueryPctList[1]); queryPct = Integer.valueOf(readWriteQueryPctList[2]); } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } catch (NumberFormatException ex) { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } private void createDatabaseAndContainers(Configuration cfg) { try { cosmosAsyncDatabase = cosmosClient.getDatabase(this.configuration.getDatabaseId()); cosmosAsyncDatabase.read().block(); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosClient.createDatabase(cfg.getDatabaseId(), ThroughputProperties.createManualThroughput(this.configuration.getThroughput())).block(); cosmosAsyncDatabase = cosmosClient.getDatabase(cfg.getDatabaseId()); logger.info("Database {} is created for this test", this.configuration.getDatabaseId()); databaseCreated = true; } else { throw e; } } int numberOfCollection = cfg.getNumberOfCollectionForCtl(); if (numberOfCollection < 1) { numberOfCollection = 1; } for (int i = 1; i <= numberOfCollection; i++) { try { CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); cosmosAsyncContainer.read().block(); containers.add(cosmosAsyncContainer); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosAsyncDatabase.createContainer( this.configuration.getCollectionId() + "_" + i, Configuration.DEFAULT_PARTITION_KEY_PATH ).block(); CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); logger.info("Collection {} is created for this test", this.configuration.getCollectionId() + "_" + i); containers.add(cosmosAsyncContainer); containerToClearAfterTest.add(cosmosAsyncContainer.getId()); } else { throw e; } } } } private void initializeReporter(Configuration configuration) { if (configuration.getGraphiteEndpoint() != null) { final Graphite graphite = new Graphite(new InetSocketAddress( configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort())); reporter = GraphiteReporter.forRegistry(metricsRegistry) .prefixedWith(configuration.getOperationType().name()) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .filter(MetricFilter.ALL) .build(graphite); } else if (configuration.getReportingDirectory() != null) { reporter = CsvReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(configuration.getReportingDirectory()); } else { reporter = ConsoleReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(); } } }
logging this as warning instead? Doesn't do any harm, correct?
private void createPrePopulatedDocs(int numberOfPreCreatedDocuments) { for (CosmosAsyncContainer container : containers) { AtomicLong successCount = new AtomicLong(0); AtomicLong failureCount = new AtomicLong(0); ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>(); for (int i = 0; i < numberOfPreCreatedDocuments; i++) { String uId = UUID.randomUUID().toString(); PojoizedJson newDoc = BenchmarkHelper.generateDocument(uId, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); Flux<PojoizedJson> obs = container.createItem(newDoc).map(resp -> { PojoizedJson x = resp.getItem(); return x; }).onErrorResume(throwable -> { failureCount.incrementAndGet(); logger.error("Error during pre populating item ", throwable.getMessage()); return Mono.empty(); }).doOnSuccess(pojoizedJson -> { successCount.incrementAndGet(); }).flux(); createDocumentObservables.add(obs); } docsToRead.put(container.getId(), Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block()); logger.info("Finished pre-populating {} documents for container {}", successCount.get() - failureCount.get(), container.getId()); if (failureCount.get() > 0) { logger.info("Failed pre-populating {} documents for container {}", failureCount.get(), container.getId()); } } }
logger.error("Error during pre populating item ", throwable.getMessage());
private void createPrePopulatedDocs(int numberOfPreCreatedDocuments) { for (CosmosAsyncContainer container : containers) { AtomicLong successCount = new AtomicLong(0); AtomicLong failureCount = new AtomicLong(0); ArrayList<Flux<PojoizedJson>> createDocumentObservables = new ArrayList<>(); for (int i = 0; i < numberOfPreCreatedDocuments; i++) { String uId = UUID.randomUUID().toString(); PojoizedJson newDoc = BenchmarkHelper.generateDocument(uId, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); Flux<PojoizedJson> obs = container.createItem(newDoc).map(resp -> { PojoizedJson x = resp.getItem(); return x; }).onErrorResume(throwable -> { failureCount.incrementAndGet(); logger.error("Error during pre populating item ", throwable.getMessage()); return Mono.empty(); }).doOnSuccess(pojoizedJson -> { successCount.incrementAndGet(); }).flux(); createDocumentObservables.add(obs); } docsToRead.put(container.getId(), Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block()); logger.info("Finished pre-populating {} documents for container {}", successCount.get() - failureCount.get(), container.getId()); if (failureCount.get() > 0) { logger.info("Failed pre-populating {} documents for container {}", failureCount.get(), container.getId()); } } }
class AsyncCtlWorkload { private final String PERCENT_PARSING_ERROR = "Unable to parse user provided readWriteQueryPct "; private final String prefixUuidForCreate; private final String dataFieldValue; private final String partitionKey; private final MetricRegistry metricsRegistry = new MetricRegistry(); private final Logger logger; private final CosmosAsyncClient cosmosClient; private final Configuration configuration; private final Map<String, List<PojoizedJson>> docsToRead = new HashMap<>(); private final Semaphore concurrencyControlSemaphore; private final Random random; private Timer readLatency; private Timer writeLatency; private Timer queryLatency; private ScheduledReporter reporter; private Meter readSuccessMeter; private Meter readFailureMeter; private Meter writeSuccessMeter; private Meter writeFailureMeter; private Meter querySuccessMeter; private Meter queryFailureMeter; private CosmosAsyncDatabase cosmosAsyncDatabase; private List<CosmosAsyncContainer> containers = new ArrayList<>(); private List<String> containerToClearAfterTest = new ArrayList<>(); private boolean databaseCreated; private int readPct; private int writePct; private int queryPct; public AsyncCtlWorkload(Configuration cfg) { CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder() .endpoint(cfg.getServiceEndpoint()) .key(cfg.getMasterKey()) .consistencyLevel(cfg.getConsistencyLevel()) .contentResponseOnWriteEnabled(Boolean.parseBoolean(cfg.isContentResponseOnWriteEnabled())); if (cfg.getConnectionMode().equals(ConnectionMode.DIRECT)) { cosmosClientBuilder = cosmosClientBuilder.directMode(DirectConnectionConfig.getDefaultConfig()); } else { GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); gatewayConnectionConfig.setMaxConnectionPoolSize(cfg.getMaxConnectionPoolSize()); cosmosClientBuilder = cosmosClientBuilder.gatewayMode(gatewayConnectionConfig); } cosmosClient = cosmosClientBuilder.buildAsyncClient(); configuration = cfg; logger = LoggerFactory.getLogger(this.getClass()); parsedReadWriteQueryPct(configuration.getReadWriteQueryPct()); createDatabaseAndContainers(configuration); partitionKey = containers.get(0).read().block().getProperties().getPartitionKeyDefinition() .getPaths().iterator().next().split("/")[1]; concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency()); logger.info("PRE-populating {} documents ....", cfg.getNumberOfPreCreatedDocuments()); dataFieldValue = RandomStringUtils.randomAlphabetic(configuration.getDocumentDataFieldSize()); createPrePopulatedDocs(configuration.getNumberOfPreCreatedDocuments()); if (configuration.isEnableJvmStats()) { metricsRegistry.register("gc", new GarbageCollectorMetricSet()); metricsRegistry.register("threads", new CachedThreadStatesGaugeSet(10, TimeUnit.SECONDS)); metricsRegistry.register("memory", new MemoryUsageGaugeSet()); } initializeReporter(cfg); MeterRegistry registry = configuration.getAzureMonitorMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } registry = configuration.getGraphiteMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } prefixUuidForCreate = UUID.randomUUID().toString(); random = new Random(); } public void shutdown() { if (this.databaseCreated) { cosmosAsyncDatabase.delete().block(); logger.info("Deleted temporary database {} created for this test", this.configuration.getDatabaseId()); } else if (containerToClearAfterTest.size() > 0) { for (String id : containerToClearAfterTest) { cosmosAsyncDatabase.getContainer(id).delete().block(); logger.info("Deleted temporary collection {} created for this test", id); } } cosmosClient.close(); } private void performWorkload(BaseSubscriber<Object> documentSubscriber, OperationType type, long i) throws Exception { Flux<? extends Object> obs; CosmosAsyncContainer container = containers.get((int) i % containers.size()); if (type.equals(OperationType.Create)) { PojoizedJson data = BenchmarkHelper.generateDocument(prefixUuidForCreate + i, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); obs = container.createItem(data).flux(); } else if (type.equals(OperationType.Query)) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); String sqlQuery = "Select top 100 * from c order by c._ts"; obs = container.queryItems(sqlQuery, options, PojoizedJson.class).byPage(10); } else { int index = random.nextInt(docsToRead.get(container.getId()).size()); RequestOptions options = new RequestOptions(); String partitionKeyValue = docsToRead.get(container.getId()).get(index).getId(); options.setPartitionKey(new PartitionKey(partitionKeyValue)); obs = container.readItem(docsToRead.get(container.getId()).get(index).getId(), new PartitionKey(partitionKeyValue), PojoizedJson.class) .flux(); } concurrencyControlSemaphore.acquire(); obs.subscribeOn(Schedulers.parallel()).subscribe(documentSubscriber); } public void run() throws Exception { readSuccessMeter = metricsRegistry.meter(" readFailureMeter = metricsRegistry.meter(" writeSuccessMeter = metricsRegistry.meter(" writeFailureMeter = metricsRegistry.meter(" querySuccessMeter = metricsRegistry.meter(" queryFailureMeter = metricsRegistry.meter(" readLatency = metricsRegistry.register("Read Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); writeLatency = metricsRegistry.register("Write Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); queryLatency = metricsRegistry.register("Query Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS); long startTime = System.currentTimeMillis(); AtomicLong count = new AtomicLong(0); long i; int writeRange = readPct + writePct; for (i = 0; BenchmarkHelper.shouldContinue(startTime, i, configuration); i++) { int index = (int) i % 100; if (index < readPct) { BenchmarkRequestSubscriber<Object> readSubscriber = new BenchmarkRequestSubscriber<>(readSuccessMeter, readFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); readSubscriber.context = readLatency.time(); performWorkload(readSubscriber, OperationType.Read, i); } else if (index < writeRange) { BenchmarkRequestSubscriber<Object> writeSubscriber = new BenchmarkRequestSubscriber<>(writeSuccessMeter, writeFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); writeSubscriber.context = writeLatency.time(); performWorkload(writeSubscriber, OperationType.Create, i); } else { BenchmarkRequestSubscriber<Object> querySubscriber = new BenchmarkRequestSubscriber<>(querySuccessMeter, queryFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); querySubscriber.context = queryLatency.time(); performWorkload(querySubscriber, OperationType.Query, i); } } synchronized (count) { while (count.get() < i) { count.wait(); } } long endTime = System.currentTimeMillis(); logger.info("[{}] operations performed in [{}] seconds.", configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); reporter.report(); reporter.close(); } private void parsedReadWriteQueryPct(String readWriteQueryPct) { String[] readWriteQueryPctList = readWriteQueryPct.split(","); if (readWriteQueryPctList.length == 3) { try { if (Integer.valueOf(readWriteQueryPctList[0]) + Integer.valueOf(readWriteQueryPctList[1]) + Integer.valueOf(readWriteQueryPctList[2]) == 100) { readPct = Integer.valueOf(readWriteQueryPctList[0]); writePct = Integer.valueOf(readWriteQueryPctList[1]); queryPct = Integer.valueOf(readWriteQueryPctList[2]); } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } catch (NumberFormatException ex) { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } private void createDatabaseAndContainers(Configuration cfg) { try { cosmosAsyncDatabase = cosmosClient.getDatabase(this.configuration.getDatabaseId()); cosmosAsyncDatabase.read().block(); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosClient.createDatabase(cfg.getDatabaseId(), ThroughputProperties.createManualThroughput(this.configuration.getThroughput())).block(); cosmosAsyncDatabase = cosmosClient.getDatabase(cfg.getDatabaseId()); logger.info("Database {} is created for this test", this.configuration.getDatabaseId()); databaseCreated = true; } else { throw e; } } int numberOfCollection = cfg.getNumberOfCollectionForCtl(); if (numberOfCollection < 1) { numberOfCollection = 1; } for (int i = 1; i <= numberOfCollection; i++) { try { CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); cosmosAsyncContainer.read().block(); containers.add(cosmosAsyncContainer); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosAsyncDatabase.createContainer( this.configuration.getCollectionId() + "_" + i, Configuration.DEFAULT_PARTITION_KEY_PATH ).block(); CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); logger.info("Collection {} is created for this test", this.configuration.getCollectionId() + "_" + i); containers.add(cosmosAsyncContainer); containerToClearAfterTest.add(cosmosAsyncContainer.getId()); } else { throw e; } } } } private void initializeReporter(Configuration configuration) { if (configuration.getGraphiteEndpoint() != null) { final Graphite graphite = new Graphite(new InetSocketAddress( configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort())); reporter = GraphiteReporter.forRegistry(metricsRegistry) .prefixedWith(configuration.getOperationType().name()) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .filter(MetricFilter.ALL) .build(graphite); } else if (configuration.getReportingDirectory() != null) { reporter = CsvReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(configuration.getReportingDirectory()); } else { reporter = ConsoleReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(); } } }
class AsyncCtlWorkload { private final String PERCENT_PARSING_ERROR = "Unable to parse user provided readWriteQueryPct "; private final String prefixUuidForCreate; private final String dataFieldValue; private final String partitionKey; private final MetricRegistry metricsRegistry = new MetricRegistry(); private final Logger logger; private final CosmosAsyncClient cosmosClient; private final Configuration configuration; private final Map<String, List<PojoizedJson>> docsToRead = new HashMap<>(); private final Semaphore concurrencyControlSemaphore; private final Random random; private Timer readLatency; private Timer writeLatency; private Timer queryLatency; private ScheduledReporter reporter; private Meter readSuccessMeter; private Meter readFailureMeter; private Meter writeSuccessMeter; private Meter writeFailureMeter; private Meter querySuccessMeter; private Meter queryFailureMeter; private CosmosAsyncDatabase cosmosAsyncDatabase; private List<CosmosAsyncContainer> containers = new ArrayList<>(); private List<String> containerToClearAfterTest = new ArrayList<>(); private boolean databaseCreated; private int readPct; private int writePct; private int queryPct; public AsyncCtlWorkload(Configuration cfg) { CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder() .endpoint(cfg.getServiceEndpoint()) .key(cfg.getMasterKey()) .consistencyLevel(cfg.getConsistencyLevel()) .contentResponseOnWriteEnabled(Boolean.parseBoolean(cfg.isContentResponseOnWriteEnabled())); if (cfg.getConnectionMode().equals(ConnectionMode.DIRECT)) { cosmosClientBuilder = cosmosClientBuilder.directMode(DirectConnectionConfig.getDefaultConfig()); } else { GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); gatewayConnectionConfig.setMaxConnectionPoolSize(cfg.getMaxConnectionPoolSize()); cosmosClientBuilder = cosmosClientBuilder.gatewayMode(gatewayConnectionConfig); } cosmosClient = cosmosClientBuilder.buildAsyncClient(); configuration = cfg; logger = LoggerFactory.getLogger(this.getClass()); parsedReadWriteQueryPct(configuration.getReadWriteQueryPct()); createDatabaseAndContainers(configuration); partitionKey = containers.get(0).read().block().getProperties().getPartitionKeyDefinition() .getPaths().iterator().next().split("/")[1]; concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency()); logger.info("PRE-populating {} documents ....", cfg.getNumberOfPreCreatedDocuments()); dataFieldValue = RandomStringUtils.randomAlphabetic(configuration.getDocumentDataFieldSize()); createPrePopulatedDocs(configuration.getNumberOfPreCreatedDocuments()); if (configuration.isEnableJvmStats()) { metricsRegistry.register("gc", new GarbageCollectorMetricSet()); metricsRegistry.register("threads", new CachedThreadStatesGaugeSet(10, TimeUnit.SECONDS)); metricsRegistry.register("memory", new MemoryUsageGaugeSet()); } initializeReporter(cfg); MeterRegistry registry = configuration.getAzureMonitorMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } registry = configuration.getGraphiteMeterRegistry(); if (registry != null) { BridgeInternal.monitorTelemetry(registry); } prefixUuidForCreate = UUID.randomUUID().toString(); random = new Random(); } public void shutdown() { if (this.databaseCreated) { cosmosAsyncDatabase.delete().block(); logger.info("Deleted temporary database {} created for this test", this.configuration.getDatabaseId()); } else if (containerToClearAfterTest.size() > 0) { for (String id : containerToClearAfterTest) { cosmosAsyncDatabase.getContainer(id).delete().block(); logger.info("Deleted temporary collection {} created for this test", id); } } cosmosClient.close(); } private void performWorkload(BaseSubscriber<Object> documentSubscriber, OperationType type, long i) throws Exception { Flux<? extends Object> obs; CosmosAsyncContainer container = containers.get((int) i % containers.size()); if (type.equals(OperationType.Create)) { PojoizedJson data = BenchmarkHelper.generateDocument(prefixUuidForCreate + i, dataFieldValue, partitionKey, configuration.getDocumentDataFieldCount()); obs = container.createItem(data).flux(); } else if (type.equals(OperationType.Query)) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); String sqlQuery = "Select top 100 * from c order by c._ts"; obs = container.queryItems(sqlQuery, options, PojoizedJson.class).byPage(10); } else { int index = random.nextInt(docsToRead.get(container.getId()).size()); RequestOptions options = new RequestOptions(); String partitionKeyValue = docsToRead.get(container.getId()).get(index).getId(); options.setPartitionKey(new PartitionKey(partitionKeyValue)); obs = container.readItem(docsToRead.get(container.getId()).get(index).getId(), new PartitionKey(partitionKeyValue), PojoizedJson.class) .flux(); } concurrencyControlSemaphore.acquire(); obs.subscribeOn(Schedulers.parallel()).subscribe(documentSubscriber); } public void run() throws Exception { readSuccessMeter = metricsRegistry.meter(" readFailureMeter = metricsRegistry.meter(" writeSuccessMeter = metricsRegistry.meter(" writeFailureMeter = metricsRegistry.meter(" querySuccessMeter = metricsRegistry.meter(" queryFailureMeter = metricsRegistry.meter(" readLatency = metricsRegistry.register("Read Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); writeLatency = metricsRegistry.register("Write Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); queryLatency = metricsRegistry.register("Query Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS); long startTime = System.currentTimeMillis(); AtomicLong count = new AtomicLong(0); long i; int writeRange = readPct + writePct; for (i = 0; BenchmarkHelper.shouldContinue(startTime, i, configuration); i++) { int index = (int) i % 100; if (index < readPct) { BenchmarkRequestSubscriber<Object> readSubscriber = new BenchmarkRequestSubscriber<>(readSuccessMeter, readFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); readSubscriber.context = readLatency.time(); performWorkload(readSubscriber, OperationType.Read, i); } else if (index < writeRange) { BenchmarkRequestSubscriber<Object> writeSubscriber = new BenchmarkRequestSubscriber<>(writeSuccessMeter, writeFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); writeSubscriber.context = writeLatency.time(); performWorkload(writeSubscriber, OperationType.Create, i); } else { BenchmarkRequestSubscriber<Object> querySubscriber = new BenchmarkRequestSubscriber<>(querySuccessMeter, queryFailureMeter, concurrencyControlSemaphore, count, configuration.getDiagnosticsThresholdDuration()); querySubscriber.context = queryLatency.time(); performWorkload(querySubscriber, OperationType.Query, i); } } synchronized (count) { while (count.get() < i) { count.wait(); } } long endTime = System.currentTimeMillis(); logger.info("[{}] operations performed in [{}] seconds.", configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); reporter.report(); reporter.close(); } private void parsedReadWriteQueryPct(String readWriteQueryPct) { String[] readWriteQueryPctList = readWriteQueryPct.split(","); if (readWriteQueryPctList.length == 3) { try { if (Integer.valueOf(readWriteQueryPctList[0]) + Integer.valueOf(readWriteQueryPctList[1]) + Integer.valueOf(readWriteQueryPctList[2]) == 100) { readPct = Integer.valueOf(readWriteQueryPctList[0]); writePct = Integer.valueOf(readWriteQueryPctList[1]); queryPct = Integer.valueOf(readWriteQueryPctList[2]); } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } catch (NumberFormatException ex) { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } else { throw new IllegalArgumentException(PERCENT_PARSING_ERROR + readWriteQueryPct); } } private void createDatabaseAndContainers(Configuration cfg) { try { cosmosAsyncDatabase = cosmosClient.getDatabase(this.configuration.getDatabaseId()); cosmosAsyncDatabase.read().block(); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosClient.createDatabase(cfg.getDatabaseId(), ThroughputProperties.createManualThroughput(this.configuration.getThroughput())).block(); cosmosAsyncDatabase = cosmosClient.getDatabase(cfg.getDatabaseId()); logger.info("Database {} is created for this test", this.configuration.getDatabaseId()); databaseCreated = true; } else { throw e; } } int numberOfCollection = cfg.getNumberOfCollectionForCtl(); if (numberOfCollection < 1) { numberOfCollection = 1; } for (int i = 1; i <= numberOfCollection; i++) { try { CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); cosmosAsyncContainer.read().block(); containers.add(cosmosAsyncContainer); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosAsyncDatabase.createContainer( this.configuration.getCollectionId() + "_" + i, Configuration.DEFAULT_PARTITION_KEY_PATH ).block(); CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); logger.info("Collection {} is created for this test", this.configuration.getCollectionId() + "_" + i); containers.add(cosmosAsyncContainer); containerToClearAfterTest.add(cosmosAsyncContainer.getId()); } else { throw e; } } } } private void initializeReporter(Configuration configuration) { if (configuration.getGraphiteEndpoint() != null) { final Graphite graphite = new Graphite(new InetSocketAddress( configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort())); reporter = GraphiteReporter.forRegistry(metricsRegistry) .prefixedWith(configuration.getOperationType().name()) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .filter(MetricFilter.ALL) .build(graphite); } else if (configuration.getReportingDirectory() != null) { reporter = CsvReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(configuration.getReportingDirectory()); } else { reporter = ConsoleReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(); } } }
I added several other methods (`getTaskCountsResult()`, `getTaskSlotCounts()`) but kept this return type this intact. Doing otherwise would break the API. Also added some assertions for slot counts in `TasksTest:testGetTaskCounts()`
public TaskCounts getTaskCounts(String jobId, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobGetTaskCountsOptions options = new JobGetTaskCountsOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); return this.parentBatchClient.protocolLayer().jobs().getTaskCounts(jobId, options).taskCounts(); }
return this.parentBatchClient.protocolLayer().jobs().getTaskCounts(jobId, options).taskCounts();
public TaskCounts getTaskCounts(String jobId, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { return getTaskCountsResult(jobId, additionalBehaviors).taskCounts(); }
class JobOperations implements IInheritedBehaviors { private Collection<BatchClientBehavior> customBehaviors; private final BatchClient parentBatchClient; JobOperations(BatchClient batchClient, Collection<BatchClientBehavior> inheritedBehaviors) { parentBatchClient = batchClient; InternalHelper.inheritClientBehaviorsAndSetPublicProperty(this, inheritedBehaviors); } /** * Gets a collection of behaviors that modify or customize requests to the Batch service. * * @return A collection of {@link BatchClientBehavior} instances. */ @Override public Collection<BatchClientBehavior> customBehaviors() { return customBehaviors; } /** * Sets a collection of behaviors that modify or customize requests to the Batch service. * * @param behaviors The collection of {@link BatchClientBehavior} instances. * @return The current instance. */ @Override public IInheritedBehaviors withCustomBehaviors(Collection<BatchClientBehavior> behaviors) { customBehaviors = behaviors; return this; } /** * Gets lifetime summary statistics for all of the jobs in the current account. * * @return The aggregated job statistics. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public JobStatistics getAllJobsLifetimeStatistics() throws BatchErrorException, IOException { return getAllJobsLifetimeStatistics(null); } /** * Gets lifetime summary statistics for all of the jobs in the current account. * * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @return The aggregated job statistics. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public JobStatistics getAllJobsLifetimeStatistics(Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobGetAllLifetimeStatisticsOptions options = new JobGetAllLifetimeStatisticsOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); return this.parentBatchClient.protocolLayer().jobs().getAllLifetimeStatistics(options); } /** * Gets the specified {@link CloudJob}. * * @param jobId The ID of the job to get. * @return A {@link CloudJob} containing information about the specified Azure Batch job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public CloudJob getJob(String jobId) throws BatchErrorException, IOException { return getJob(jobId, null, null); } /** * Gets the specified {@link CloudJob}. * * @param jobId The ID of the job to get. * @param detailLevel A {@link DetailLevel} used for controlling which properties are retrieved from the service. * @return A {@link CloudJob} containing information about the specified Azure Batch job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public CloudJob getJob(String jobId, DetailLevel detailLevel) throws BatchErrorException, IOException { return getJob(jobId, detailLevel, null); } /** * Gets the specified {@link CloudJob}. * * @param jobId The ID of the job to get. * @param detailLevel A {@link DetailLevel} used for controlling which properties are retrieved from the service. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @return A {@link CloudJob} containing information about the specified Azure Batch job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public CloudJob getJob(String jobId, DetailLevel detailLevel, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobGetOptions getJobOptions = new JobGetOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.appendDetailLevelToPerCallBehaviors(detailLevel); bhMgr.applyRequestBehaviors(getJobOptions); return this.parentBatchClient.protocolLayer().jobs().get(jobId, getJobOptions); } /** * Lists the {@link CloudJob jobs} in the Batch account. * * @return A list of {@link CloudJob} objects. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<CloudJob> listJobs() throws BatchErrorException, IOException { return listJobs(null, (Iterable<BatchClientBehavior>) null); } /** * Lists the {@link CloudJob jobs} in the Batch account. * * @param detailLevel A {@link DetailLevel} used for filtering the list and for controlling which properties are retrieved from the service. * @return A list of {@link CloudJob} objects. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<CloudJob> listJobs(DetailLevel detailLevel) throws BatchErrorException, IOException { return listJobs(detailLevel, null); } /** * Lists the {@link CloudJob jobs} in the Batch account. * * @param detailLevel A {@link DetailLevel} used for filtering the list and for controlling which properties are retrieved from the service. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @return A list of {@link CloudJob} objects. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<CloudJob> listJobs(DetailLevel detailLevel, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobListOptions jobListOptions = new JobListOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.appendDetailLevelToPerCallBehaviors(detailLevel); bhMgr.applyRequestBehaviors(jobListOptions); return this.parentBatchClient.protocolLayer().jobs().list(jobListOptions); } /** * Lists the {@link CloudJob jobs} created under the specified job schedule. * * @param jobScheduleId The ID of job schedule. * @return A list of {@link CloudJob} objects. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<CloudJob> listJobs(String jobScheduleId) throws BatchErrorException, IOException { return listJobs(jobScheduleId, null, null); } /** * Lists the {@link CloudJob jobs} created under the specified job schedule. * * @param jobScheduleId The ID of job schedule. * @param detailLevel A {@link DetailLevel} used for filtering the list and for controlling which properties are retrieved from the service. * @return A list of {@link CloudJob} objects. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<CloudJob> listJobs(String jobScheduleId, DetailLevel detailLevel) throws BatchErrorException, IOException { return listJobs(jobScheduleId, detailLevel, null); } /** * Lists the {@link CloudJob jobs} created under the specified jobSchedule. * * @param jobScheduleId The ID of jobSchedule. * @param detailLevel A {@link DetailLevel} used for filtering the list and for controlling which properties are retrieved from the service. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @return A list of {@link CloudJob} objects. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<CloudJob> listJobs(String jobScheduleId, DetailLevel detailLevel, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobListFromJobScheduleOptions jobListOptions = new JobListFromJobScheduleOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.appendDetailLevelToPerCallBehaviors(detailLevel); bhMgr.applyRequestBehaviors(jobListOptions); return this.parentBatchClient.protocolLayer().jobs().listFromJobSchedule(jobScheduleId, jobListOptions); } /** * Lists the status of {@link JobPreparationTask} and {@link JobReleaseTask} tasks for the specified job. * * @param jobId The ID of the job. * @return A list of {@link JobPreparationAndReleaseTaskExecutionInformation} instances. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<JobPreparationAndReleaseTaskExecutionInformation> listPreparationAndReleaseTaskStatus(String jobId) throws BatchErrorException, IOException { return listPreparationAndReleaseTaskStatus(jobId, null); } /** * Lists the status of {@link JobPreparationTask} and {@link JobReleaseTask} tasks for the specified job. * * @param jobId The ID of the job. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @return A list of {@link JobPreparationAndReleaseTaskExecutionInformation} instances. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<JobPreparationAndReleaseTaskExecutionInformation> listPreparationAndReleaseTaskStatus(String jobId, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobListPreparationAndReleaseTaskStatusOptions jobListOptions = new JobListPreparationAndReleaseTaskStatusOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(jobListOptions); return this.parentBatchClient.protocolLayer().jobs().listPreparationAndReleaseTaskStatus(jobId, jobListOptions); } /** * Adds a job to the Batch account. * * @param jobId The ID of the job to be added. * @param poolInfo Specifies how a job should be assigned to a pool. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void createJob(String jobId, PoolInformation poolInfo) throws BatchErrorException, IOException { createJob(jobId, poolInfo, null); } /** * Adds a job to the Batch account. * * @param jobId The ID of the job to be added. * @param poolInfo Specifies how a job should be assigned to a pool. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void createJob(String jobId, PoolInformation poolInfo, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobAddParameter param = new JobAddParameter() .withId(jobId) .withPoolInfo(poolInfo); createJob(param, additionalBehaviors); } /** * Adds a job to the Batch account. * * @param job The job to be added. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void createJob(JobAddParameter job) throws BatchErrorException, IOException { createJob(job, null); } /** * Adds a job to the Batch account. * * @param job The job to be added. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void createJob(JobAddParameter job, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobAddOptions options = new JobAddOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); this.parentBatchClient.protocolLayer().jobs().add(job, options); } /** * Deletes the specified job. * * @param jobId The ID of the job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void deleteJob(String jobId) throws BatchErrorException, IOException { deleteJob(jobId, null); } /** * Deletes the specified job. * * @param jobId The ID of the job. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void deleteJob(String jobId, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobDeleteOptions options = new JobDeleteOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); this.parentBatchClient.protocolLayer().jobs().delete(jobId, options); } /** * Terminates the specified job, marking it as completed. * * @param jobId The ID of the job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void terminateJob(String jobId) throws BatchErrorException, IOException { terminateJob(jobId, null, null); } /** * Terminates the specified job, marking it as completed. * * @param jobId The ID of the job. * @param terminateReason The message to describe the reason the job has terminated. This text will appear when you call {@link JobExecutionInformation * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void terminateJob(String jobId, String terminateReason) throws BatchErrorException, IOException { terminateJob(jobId, terminateReason, null); } /** * Terminates the specified job, marking it as completed. * * @param jobId The ID of the job. * @param terminateReason The message to describe the reason the job has terminated. This text will appear when you call {@link JobExecutionInformation * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void terminateJob(String jobId, String terminateReason, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobTerminateOptions options = new JobTerminateOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); this.parentBatchClient.protocolLayer().jobs().terminate(jobId, terminateReason, options); } /** * Enables the specified job, allowing new tasks to run. * * @param jobId The ID of the job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void enableJob(String jobId) throws BatchErrorException, IOException { enableJob(jobId, null); } /** * Enables the specified job, allowing new tasks to run. * * @param jobId The ID of the job. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void enableJob(String jobId, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobEnableOptions options = new JobEnableOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); this.parentBatchClient.protocolLayer().jobs().enable(jobId, options); } /** * Disables the specified job. Disabled jobs do not run new tasks, but may be re-enabled later. * * @param jobId The ID of the job. * @param disableJobOption Specifies what to do with running tasks associated with the job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void disableJob(String jobId, DisableJobOption disableJobOption) throws BatchErrorException, IOException { disableJob(jobId, disableJobOption, null); } /** * Disables the specified job. Disabled jobs do not run new tasks, but may be re-enabled later. * * @param jobId The ID of the job. * @param disableJobOption Specifies what to do with running tasks associated with the job. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void disableJob(String jobId, DisableJobOption disableJobOption, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobDisableOptions options = new JobDisableOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); this.parentBatchClient.protocolLayer().jobs().disable(jobId, disableJobOption, options); } /** * Updates the specified job. * This method performs a full replace of all updatable properties of the job. For example, if the constraints parameter is null, then the Batch service removes the job's existing constraints and replaces them with the default constraints. * * @param jobId The ID of the job. * @param poolInfo The pool on which the Batch service runs the job's tasks. You may change the pool for a job only when the job is disabled. If you specify an autoPoolSpecification specification in the poolInfo, only the keepAlive property can be updated, and then only if the auto pool has a poolLifetimeOption of job. * @param priority The priority of the job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If null, it is set to the default value 0. * @param constraints The execution constraints for the job. If null, the constraints are cleared. * @param onAllTasksComplete Specifies an action the Batch service should take when all tasks in the job are in the completed state. * @param metadata A list of name-value pairs associated with the job as metadata. If null, it takes the default value of an empty list; in effect, any existing metadata is deleted. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void updateJob(String jobId, PoolInformation poolInfo, Integer priority, JobConstraints constraints, OnAllTasksComplete onAllTasksComplete, List<MetadataItem> metadata) throws BatchErrorException, IOException { updateJob(jobId, poolInfo, priority, constraints, onAllTasksComplete, metadata, null); } /** * Updates the specified job. * This method performs a full replace of all updatable properties of the job. For example, if the constraints parameter is null, then the Batch service removes the job's existing constraints and replaces them with the default constraints. * * @param jobId The ID of the job. * @param poolInfo The pool on which the Batch service runs the job's tasks. You may change the pool for a job only when the job is disabled. If you specify an autoPoolSpecification specification in the poolInfo, only the keepAlive property can be updated, and then only if the auto pool has a poolLifetimeOption of job. * @param priority The priority of the job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If null, it is set to the default value 0. * @param constraints The execution constraints for the job. If null, the constraints are cleared. * @param onAllTasksComplete Specifies an action the Batch service should take when all tasks in the job are in the completed state. * @param metadata A list of name-value pairs associated with the job as metadata. If null, it takes the default value of an empty list; in effect, any existing metadata is deleted. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void updateJob(String jobId, PoolInformation poolInfo, Integer priority, JobConstraints constraints, OnAllTasksComplete onAllTasksComplete, List<MetadataItem> metadata, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobUpdateOptions options = new JobUpdateOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); JobUpdateParameter param = new JobUpdateParameter() .withPriority(priority) .withPoolInfo(poolInfo) .withConstraints(constraints) .withOnAllTasksComplete(onAllTasksComplete) .withMetadata(metadata); this.parentBatchClient.protocolLayer().jobs().update(jobId, param, options); } /** * Updates the specified job. * This method only replaces the properties specified with non-null values. * * @param jobId The ID of the job. * @param poolInfo The pool on which the Batch service runs the job's tasks. You may change the pool for a job only when the job is disabled. If you specify an autoPoolSpecification specification in the poolInfo, only the keepAlive property can be updated, and then only if the auto pool has a poolLifetimeOption of job. If null, the job continues to run on its current pool. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void patchJob(String jobId, PoolInformation poolInfo) throws BatchErrorException, IOException { patchJob(jobId, poolInfo, null, null, null, null, null); } /** * Updates the specified job. * This method only replaces the properties specified with non-null values. * * @param jobId The ID of the job. * @param onAllTasksComplete Specifies an action the Batch service should take when all tasks in the job are in the completed state. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void patchJob(String jobId, OnAllTasksComplete onAllTasksComplete) throws BatchErrorException, IOException { patchJob(jobId, null, null, null, onAllTasksComplete, null, null); } /** * Updates the specified job. * This method only replaces the properties specified with non-null values. * * @param jobId The ID of the job. * @param poolInfo The pool on which the Batch service runs the job's tasks. You may change the pool for a job only when the job is disabled. If you specify an autoPoolSpecification specification in the poolInfo, only the keepAlive property can be updated, and then only if the auto pool has a poolLifetimeOption of job. If null, the job continues to run on its current pool. * @param priority The priority of the job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If null, the priority of the job is left unchanged. * @param constraints The execution constraints for the job. If null, the existing execution constraints are left unchanged. * @param onAllTasksComplete Specifies an action the Batch service should take when all tasks in the job are in the completed state. * @param metadata A list of name-value pairs associated with the job as metadata. If null, the existing job metadata is left unchanged. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void patchJob(String jobId, PoolInformation poolInfo, Integer priority, JobConstraints constraints, OnAllTasksComplete onAllTasksComplete, List<MetadataItem> metadata) throws BatchErrorException, IOException { patchJob(jobId, poolInfo, priority, constraints, onAllTasksComplete, metadata, null); } /** * Updates the specified job. * This method only replaces the properties specified with non-null values. * * @param jobId The ID of the job. * @param poolInfo The pool on which the Batch service runs the job's tasks. You may change the pool for a job only when the job is disabled. If you specify an autoPoolSpecification specification in the poolInfo, only the keepAlive property can be updated, and then only if the auto pool has a poolLifetimeOption of job. If null, the job continues to run on its current pool. * @param priority The priority of the job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If null, the priority of the job is left unchanged. * @param constraints The execution constraints for the job. If null, the existing execution constraints are left unchanged. * @param onAllTasksComplete Specifies an action the Batch service should take when all tasks in the job are in the completed state. * @param metadata A list of name-value pairs associated with the job as metadata. If null, the existing job metadata is left unchanged. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void patchJob(String jobId, PoolInformation poolInfo, Integer priority, JobConstraints constraints, OnAllTasksComplete onAllTasksComplete, List<MetadataItem> metadata, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobPatchParameter param = new JobPatchParameter() .withPriority(priority) .withPoolInfo(poolInfo) .withConstraints(constraints) .withOnAllTasksComplete(onAllTasksComplete) .withMetadata(metadata); patchJob(jobId, param, additionalBehaviors); } /** * Updates the specified job. * This method only replaces the properties specified with non-null values. * * @param jobId The ID of the job. * @param jobPatchParameter The set of changes to be made to a job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void patchJob(String jobId, JobPatchParameter jobPatchParameter) throws BatchErrorException, IOException { patchJob(jobId, jobPatchParameter, null); } /** * Updates the specified job. * This method only replaces the properties specified with non-null values. * * @param jobId The ID of the job. * @param jobPatchParameter The parameter to update the job. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void patchJob(String jobId, JobPatchParameter jobPatchParameter, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobPatchOptions options = new JobPatchOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); this.parentBatchClient.protocolLayer().jobs().patch(jobId, jobPatchParameter, options); } /** * Gets the task counts for the specified job. * Task counts provide a count of the tasks by active, running or completed task state, and a count of tasks which succeeded or failed. Tasks in the preparing state are counted as running. * * @param jobId The ID of the job. * @throws BatchErrorException thrown if the request is rejected by server * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. * @return the TaskCounts object if successful. */ public TaskCounts getTaskCounts(String jobId) throws BatchErrorException, IOException { return getTaskCounts(jobId, null); } /** * Gets the task counts for the specified job. * Task counts provide a count of the tasks by active, running or completed task state, and a count of tasks which succeeded or failed. Tasks in the preparing state are counted as running. * * @param jobId The ID of the job. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException thrown if the request is rejected by server * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. * @return the TaskCounts object if successful. */ }
class JobOperations implements IInheritedBehaviors { private Collection<BatchClientBehavior> customBehaviors; private final BatchClient parentBatchClient; JobOperations(BatchClient batchClient, Collection<BatchClientBehavior> inheritedBehaviors) { parentBatchClient = batchClient; InternalHelper.inheritClientBehaviorsAndSetPublicProperty(this, inheritedBehaviors); } /** * Gets a collection of behaviors that modify or customize requests to the Batch service. * * @return A collection of {@link BatchClientBehavior} instances. */ @Override public Collection<BatchClientBehavior> customBehaviors() { return customBehaviors; } /** * Sets a collection of behaviors that modify or customize requests to the Batch service. * * @param behaviors The collection of {@link BatchClientBehavior} instances. * @return The current instance. */ @Override public IInheritedBehaviors withCustomBehaviors(Collection<BatchClientBehavior> behaviors) { customBehaviors = behaviors; return this; } /** * Gets lifetime summary statistics for all of the jobs in the current account. * * @return The aggregated job statistics. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public JobStatistics getAllJobsLifetimeStatistics() throws BatchErrorException, IOException { return getAllJobsLifetimeStatistics(null); } /** * Gets lifetime summary statistics for all of the jobs in the current account. * * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @return The aggregated job statistics. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public JobStatistics getAllJobsLifetimeStatistics(Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobGetAllLifetimeStatisticsOptions options = new JobGetAllLifetimeStatisticsOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); return this.parentBatchClient.protocolLayer().jobs().getAllLifetimeStatistics(options); } /** * Gets the specified {@link CloudJob}. * * @param jobId The ID of the job to get. * @return A {@link CloudJob} containing information about the specified Azure Batch job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public CloudJob getJob(String jobId) throws BatchErrorException, IOException { return getJob(jobId, null, null); } /** * Gets the specified {@link CloudJob}. * * @param jobId The ID of the job to get. * @param detailLevel A {@link DetailLevel} used for controlling which properties are retrieved from the service. * @return A {@link CloudJob} containing information about the specified Azure Batch job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public CloudJob getJob(String jobId, DetailLevel detailLevel) throws BatchErrorException, IOException { return getJob(jobId, detailLevel, null); } /** * Gets the specified {@link CloudJob}. * * @param jobId The ID of the job to get. * @param detailLevel A {@link DetailLevel} used for controlling which properties are retrieved from the service. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @return A {@link CloudJob} containing information about the specified Azure Batch job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public CloudJob getJob(String jobId, DetailLevel detailLevel, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobGetOptions getJobOptions = new JobGetOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.appendDetailLevelToPerCallBehaviors(detailLevel); bhMgr.applyRequestBehaviors(getJobOptions); return this.parentBatchClient.protocolLayer().jobs().get(jobId, getJobOptions); } /** * Lists the {@link CloudJob jobs} in the Batch account. * * @return A list of {@link CloudJob} objects. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<CloudJob> listJobs() throws BatchErrorException, IOException { return listJobs(null, (Iterable<BatchClientBehavior>) null); } /** * Lists the {@link CloudJob jobs} in the Batch account. * * @param detailLevel A {@link DetailLevel} used for filtering the list and for controlling which properties are retrieved from the service. * @return A list of {@link CloudJob} objects. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<CloudJob> listJobs(DetailLevel detailLevel) throws BatchErrorException, IOException { return listJobs(detailLevel, null); } /** * Lists the {@link CloudJob jobs} in the Batch account. * * @param detailLevel A {@link DetailLevel} used for filtering the list and for controlling which properties are retrieved from the service. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @return A list of {@link CloudJob} objects. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<CloudJob> listJobs(DetailLevel detailLevel, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobListOptions jobListOptions = new JobListOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.appendDetailLevelToPerCallBehaviors(detailLevel); bhMgr.applyRequestBehaviors(jobListOptions); return this.parentBatchClient.protocolLayer().jobs().list(jobListOptions); } /** * Lists the {@link CloudJob jobs} created under the specified job schedule. * * @param jobScheduleId The ID of job schedule. * @return A list of {@link CloudJob} objects. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<CloudJob> listJobs(String jobScheduleId) throws BatchErrorException, IOException { return listJobs(jobScheduleId, null, null); } /** * Lists the {@link CloudJob jobs} created under the specified job schedule. * * @param jobScheduleId The ID of job schedule. * @param detailLevel A {@link DetailLevel} used for filtering the list and for controlling which properties are retrieved from the service. * @return A list of {@link CloudJob} objects. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<CloudJob> listJobs(String jobScheduleId, DetailLevel detailLevel) throws BatchErrorException, IOException { return listJobs(jobScheduleId, detailLevel, null); } /** * Lists the {@link CloudJob jobs} created under the specified jobSchedule. * * @param jobScheduleId The ID of jobSchedule. * @param detailLevel A {@link DetailLevel} used for filtering the list and for controlling which properties are retrieved from the service. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @return A list of {@link CloudJob} objects. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<CloudJob> listJobs(String jobScheduleId, DetailLevel detailLevel, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobListFromJobScheduleOptions jobListOptions = new JobListFromJobScheduleOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.appendDetailLevelToPerCallBehaviors(detailLevel); bhMgr.applyRequestBehaviors(jobListOptions); return this.parentBatchClient.protocolLayer().jobs().listFromJobSchedule(jobScheduleId, jobListOptions); } /** * Lists the status of {@link JobPreparationTask} and {@link JobReleaseTask} tasks for the specified job. * * @param jobId The ID of the job. * @return A list of {@link JobPreparationAndReleaseTaskExecutionInformation} instances. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<JobPreparationAndReleaseTaskExecutionInformation> listPreparationAndReleaseTaskStatus(String jobId) throws BatchErrorException, IOException { return listPreparationAndReleaseTaskStatus(jobId, null); } /** * Lists the status of {@link JobPreparationTask} and {@link JobReleaseTask} tasks for the specified job. * * @param jobId The ID of the job. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @return A list of {@link JobPreparationAndReleaseTaskExecutionInformation} instances. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public PagedList<JobPreparationAndReleaseTaskExecutionInformation> listPreparationAndReleaseTaskStatus(String jobId, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobListPreparationAndReleaseTaskStatusOptions jobListOptions = new JobListPreparationAndReleaseTaskStatusOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(jobListOptions); return this.parentBatchClient.protocolLayer().jobs().listPreparationAndReleaseTaskStatus(jobId, jobListOptions); } /** * Adds a job to the Batch account. * * @param jobId The ID of the job to be added. * @param poolInfo Specifies how a job should be assigned to a pool. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void createJob(String jobId, PoolInformation poolInfo) throws BatchErrorException, IOException { createJob(jobId, poolInfo, null); } /** * Adds a job to the Batch account. * * @param jobId The ID of the job to be added. * @param poolInfo Specifies how a job should be assigned to a pool. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void createJob(String jobId, PoolInformation poolInfo, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobAddParameter param = new JobAddParameter() .withId(jobId) .withPoolInfo(poolInfo); createJob(param, additionalBehaviors); } /** * Adds a job to the Batch account. * * @param job The job to be added. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void createJob(JobAddParameter job) throws BatchErrorException, IOException { createJob(job, null); } /** * Adds a job to the Batch account. * * @param job The job to be added. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void createJob(JobAddParameter job, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobAddOptions options = new JobAddOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); this.parentBatchClient.protocolLayer().jobs().add(job, options); } /** * Deletes the specified job. * * @param jobId The ID of the job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void deleteJob(String jobId) throws BatchErrorException, IOException { deleteJob(jobId, null); } /** * Deletes the specified job. * * @param jobId The ID of the job. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void deleteJob(String jobId, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobDeleteOptions options = new JobDeleteOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); this.parentBatchClient.protocolLayer().jobs().delete(jobId, options); } /** * Terminates the specified job, marking it as completed. * * @param jobId The ID of the job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void terminateJob(String jobId) throws BatchErrorException, IOException { terminateJob(jobId, null, null); } /** * Terminates the specified job, marking it as completed. * * @param jobId The ID of the job. * @param terminateReason The message to describe the reason the job has terminated. This text will appear when you call {@link JobExecutionInformation * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void terminateJob(String jobId, String terminateReason) throws BatchErrorException, IOException { terminateJob(jobId, terminateReason, null); } /** * Terminates the specified job, marking it as completed. * * @param jobId The ID of the job. * @param terminateReason The message to describe the reason the job has terminated. This text will appear when you call {@link JobExecutionInformation * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void terminateJob(String jobId, String terminateReason, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobTerminateOptions options = new JobTerminateOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); this.parentBatchClient.protocolLayer().jobs().terminate(jobId, terminateReason, options); } /** * Enables the specified job, allowing new tasks to run. * * @param jobId The ID of the job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void enableJob(String jobId) throws BatchErrorException, IOException { enableJob(jobId, null); } /** * Enables the specified job, allowing new tasks to run. * * @param jobId The ID of the job. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void enableJob(String jobId, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobEnableOptions options = new JobEnableOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); this.parentBatchClient.protocolLayer().jobs().enable(jobId, options); } /** * Disables the specified job. Disabled jobs do not run new tasks, but may be re-enabled later. * * @param jobId The ID of the job. * @param disableJobOption Specifies what to do with running tasks associated with the job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void disableJob(String jobId, DisableJobOption disableJobOption) throws BatchErrorException, IOException { disableJob(jobId, disableJobOption, null); } /** * Disables the specified job. Disabled jobs do not run new tasks, but may be re-enabled later. * * @param jobId The ID of the job. * @param disableJobOption Specifies what to do with running tasks associated with the job. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void disableJob(String jobId, DisableJobOption disableJobOption, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobDisableOptions options = new JobDisableOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); this.parentBatchClient.protocolLayer().jobs().disable(jobId, disableJobOption, options); } /** * Updates the specified job. * This method performs a full replace of all updatable properties of the job. For example, if the constraints parameter is null, then the Batch service removes the job's existing constraints and replaces them with the default constraints. * * @param jobId The ID of the job. * @param poolInfo The pool on which the Batch service runs the job's tasks. You may change the pool for a job only when the job is disabled. If you specify an autoPoolSpecification specification in the poolInfo, only the keepAlive property can be updated, and then only if the auto pool has a poolLifetimeOption of job. * @param priority The priority of the job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If null, it is set to the default value 0. * @param constraints The execution constraints for the job. If null, the constraints are cleared. * @param onAllTasksComplete Specifies an action the Batch service should take when all tasks in the job are in the completed state. * @param metadata A list of name-value pairs associated with the job as metadata. If null, it takes the default value of an empty list; in effect, any existing metadata is deleted. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void updateJob(String jobId, PoolInformation poolInfo, Integer priority, JobConstraints constraints, OnAllTasksComplete onAllTasksComplete, List<MetadataItem> metadata) throws BatchErrorException, IOException { updateJob(jobId, poolInfo, priority, constraints, onAllTasksComplete, metadata, null); } /** * Updates the specified job. * This method performs a full replace of all updatable properties of the job. For example, if the constraints parameter is null, then the Batch service removes the job's existing constraints and replaces them with the default constraints. * * @param jobId The ID of the job. * @param poolInfo The pool on which the Batch service runs the job's tasks. You may change the pool for a job only when the job is disabled. If you specify an autoPoolSpecification specification in the poolInfo, only the keepAlive property can be updated, and then only if the auto pool has a poolLifetimeOption of job. * @param priority The priority of the job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If null, it is set to the default value 0. * @param constraints The execution constraints for the job. If null, the constraints are cleared. * @param onAllTasksComplete Specifies an action the Batch service should take when all tasks in the job are in the completed state. * @param metadata A list of name-value pairs associated with the job as metadata. If null, it takes the default value of an empty list; in effect, any existing metadata is deleted. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void updateJob(String jobId, PoolInformation poolInfo, Integer priority, JobConstraints constraints, OnAllTasksComplete onAllTasksComplete, List<MetadataItem> metadata, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobUpdateOptions options = new JobUpdateOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); JobUpdateParameter param = new JobUpdateParameter() .withPriority(priority) .withPoolInfo(poolInfo) .withConstraints(constraints) .withOnAllTasksComplete(onAllTasksComplete) .withMetadata(metadata); this.parentBatchClient.protocolLayer().jobs().update(jobId, param, options); } /** * Updates the specified job. * This method only replaces the properties specified with non-null values. * * @param jobId The ID of the job. * @param poolInfo The pool on which the Batch service runs the job's tasks. You may change the pool for a job only when the job is disabled. If you specify an autoPoolSpecification specification in the poolInfo, only the keepAlive property can be updated, and then only if the auto pool has a poolLifetimeOption of job. If null, the job continues to run on its current pool. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void patchJob(String jobId, PoolInformation poolInfo) throws BatchErrorException, IOException { patchJob(jobId, poolInfo, null, null, null, null, null); } /** * Updates the specified job. * This method only replaces the properties specified with non-null values. * * @param jobId The ID of the job. * @param onAllTasksComplete Specifies an action the Batch service should take when all tasks in the job are in the completed state. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void patchJob(String jobId, OnAllTasksComplete onAllTasksComplete) throws BatchErrorException, IOException { patchJob(jobId, null, null, null, onAllTasksComplete, null, null); } /** * Updates the specified job. * This method only replaces the properties specified with non-null values. * * @param jobId The ID of the job. * @param poolInfo The pool on which the Batch service runs the job's tasks. You may change the pool for a job only when the job is disabled. If you specify an autoPoolSpecification specification in the poolInfo, only the keepAlive property can be updated, and then only if the auto pool has a poolLifetimeOption of job. If null, the job continues to run on its current pool. * @param priority The priority of the job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If null, the priority of the job is left unchanged. * @param constraints The execution constraints for the job. If null, the existing execution constraints are left unchanged. * @param onAllTasksComplete Specifies an action the Batch service should take when all tasks in the job are in the completed state. * @param metadata A list of name-value pairs associated with the job as metadata. If null, the existing job metadata is left unchanged. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void patchJob(String jobId, PoolInformation poolInfo, Integer priority, JobConstraints constraints, OnAllTasksComplete onAllTasksComplete, List<MetadataItem> metadata) throws BatchErrorException, IOException { patchJob(jobId, poolInfo, priority, constraints, onAllTasksComplete, metadata, null); } /** * Updates the specified job. * This method only replaces the properties specified with non-null values. * * @param jobId The ID of the job. * @param poolInfo The pool on which the Batch service runs the job's tasks. You may change the pool for a job only when the job is disabled. If you specify an autoPoolSpecification specification in the poolInfo, only the keepAlive property can be updated, and then only if the auto pool has a poolLifetimeOption of job. If null, the job continues to run on its current pool. * @param priority The priority of the job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If null, the priority of the job is left unchanged. * @param constraints The execution constraints for the job. If null, the existing execution constraints are left unchanged. * @param onAllTasksComplete Specifies an action the Batch service should take when all tasks in the job are in the completed state. * @param metadata A list of name-value pairs associated with the job as metadata. If null, the existing job metadata is left unchanged. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void patchJob(String jobId, PoolInformation poolInfo, Integer priority, JobConstraints constraints, OnAllTasksComplete onAllTasksComplete, List<MetadataItem> metadata, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobPatchParameter param = new JobPatchParameter() .withPriority(priority) .withPoolInfo(poolInfo) .withConstraints(constraints) .withOnAllTasksComplete(onAllTasksComplete) .withMetadata(metadata); patchJob(jobId, param, additionalBehaviors); } /** * Updates the specified job. * This method only replaces the properties specified with non-null values. * * @param jobId The ID of the job. * @param jobPatchParameter The set of changes to be made to a job. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void patchJob(String jobId, JobPatchParameter jobPatchParameter) throws BatchErrorException, IOException { patchJob(jobId, jobPatchParameter, null); } /** * Updates the specified job. * This method only replaces the properties specified with non-null values. * * @param jobId The ID of the job. * @param jobPatchParameter The parameter to update the job. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException Exception thrown when an error response is received from the Batch service. * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. */ public void patchJob(String jobId, JobPatchParameter jobPatchParameter, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { JobPatchOptions options = new JobPatchOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); this.parentBatchClient.protocolLayer().jobs().patch(jobId, jobPatchParameter, options); } /** * Gets the task counts for the specified job. * Task counts provide a count of the tasks by active, running or completed task state, and a count of tasks which succeeded or failed. Tasks in the preparing state are counted as running. * * @param jobId The ID of the job. * @throws BatchErrorException thrown if the request is rejected by server * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. * @return the TaskCounts object if successful. */ public TaskCounts getTaskCounts(String jobId) throws BatchErrorException, IOException { return getTaskCounts(jobId, null); } /** * Gets the task counts for the specified job. * Task counts provide a count of the tasks by active, running or completed task state, and a count of tasks which succeeded or failed. Tasks in the preparing state are counted as running. * * @param jobId The ID of the job. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException thrown if the request is rejected by server * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. * @return the TaskCounts object if successful. */ /** * Gets the task slot counts for the specified job. * Task slot counts provide a count of the tasks by active, running or completed task state, and a count of tasks which succeeded or failed. Tasks in the preparing state are counted as running. * * @param jobId The ID of the job. * @throws BatchErrorException thrown if the request is rejected by server * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. * @return the TaskSlotCounts object if successful. */ public TaskSlotCounts getTaskSlotCounts(String jobId) throws BatchErrorException, IOException { return getTaskSlotCounts(jobId, null); } /** * Gets the task slot counts for the specified job. * Task slot counts provide a count of the tasks by active, running or completed task state, and a count of tasks which succeeded or failed. Tasks in the preparing state are counted as running. * * @param jobId The ID of the job. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException thrown if the request is rejected by server * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. * @return the TaskSlotCounts object if successful. */ public TaskSlotCounts getTaskSlotCounts(String jobId, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { return getTaskCountsResult(jobId, additionalBehaviors).taskSlotCounts(); } /** * Gets the task counts result for the specified job. * The result includes both task counts and task slot counts. Each counts object provides a count of the tasks by active, running or completed task state, and a count of tasks which succeeded or failed. Tasks in the preparing state are counted as running. * * @param jobId The ID of the job. * @throws BatchErrorException thrown if the request is rejected by server * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. * @return the TaskCountsResult object if successful. */ public TaskCountsResult getTaskCountsResult(String jobId) throws BatchErrorException, IOException { return getTaskCountsResult(jobId, null); } /** * Gets the task counts result for the specified job. * The result includes both task counts and task slot counts. Each counts object provides a count of the tasks by active, running or completed task state, and a count of tasks which succeeded or failed. Tasks in the preparing state are counted as running. * * @param jobId The ID of the job. * @param additionalBehaviors A collection of {@link BatchClientBehavior} instances that are applied to the Batch service request. * @throws BatchErrorException thrown if the request is rejected by server * @throws IOException Exception thrown when there is an error in serialization/deserialization of data sent to/received from the Batch service. * @return the TaskCountsResult object if successful. */ public TaskCountsResult getTaskCountsResult( String jobId, Iterable<BatchClientBehavior> additionalBehaviors ) throws BatchErrorException, IOException { JobGetTaskCountsOptions options = new JobGetTaskCountsOptions(); BehaviorManager bhMgr = new BehaviorManager(this.customBehaviors(), additionalBehaviors); bhMgr.applyRequestBehaviors(options); return this.parentBatchClient.protocolLayer().jobs().getTaskCounts(jobId, options); } }
pageSize++?
public void relationshipListOperationWithMultiplePages(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); int pageSize = 5; String floorModelId = getUniqueModelId(FLOOR_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomModelId = getUniqueModelId(ROOM_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacModelId = getUniqueModelId(HVAC_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String floorTwinId = getUniqueDigitalTwinId(FLOOR_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomTwinId = getUniqueDigitalTwinId(ROOM_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacTwinId = getUniqueDigitalTwinId(HVAC_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); final int MAX_WAIT_TIME_ASYNC_OPERATIONS_IN_SECONDS = 30; List<String> createdRelationshipIds = new ArrayList<>(); try { createModelsAndTwins(asyncClient, floorModelId, roomModelId, hvacModelId, floorTwinId, roomTwinId, hvacTwinId); String floorContainsRoomPayload = getRelationshipWithPropertyPayload(roomTwinId, CONTAINS_RELATIONSHIP, "isAccessRestricted", true); String roomContainedInFloorPayload = getRelationshipPayload(floorTwinId, CONTAINED_IN_RELATIONSHIP); for (int i = 0; i < pageSize++; i++) { String relationshipId = FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID + this.testResourceNamer.randomUuid(); StepVerifier.create( asyncClient.createRelationship( floorTwinId, relationshipId, deserializeJsonString(floorContainsRoomPayload, BasicRelationship.class), BasicRelationship.class)).verifyComplete(); createdRelationshipIds.add(relationshipId); } for (int i = 0; i < pageSize + 1; i++) { String relationshipId = ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID + this.testResourceNamer.randomUuid(); StepVerifier.create( asyncClient.createRelationship( roomTwinId, relationshipId, deserializeJsonString(roomContainedInFloorPayload, BasicRelationship.class), BasicRelationship.class)).verifyComplete(); createdRelationshipIds.add(relationshipId); } AtomicInteger outgoingRelationshipsPageCount = new AtomicInteger(); StepVerifier.create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class).byPage()) .thenConsumeWhile( page -> { outgoingRelationshipsPageCount.getAndIncrement(); logger.info("content for this page " + outgoingRelationshipsPageCount); for (BasicRelationship relationship : page.getValue()) { logger.info(relationship.getId()); } if (page.getContinuationToken() != null) { assertEquals(RELATIONSHIP_PAGE_SIZE_DEFAULT, page.getValue().size(), "Unexpected page size for a non-terminal page"); } return true; }) .verifyComplete(); assertTrue(outgoingRelationshipsPageCount.get() > 1, "Number of pages must be more than one."); AtomicInteger incomingRelationshipsPageCount = new AtomicInteger(); StepVerifier.create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class).byPage()) .thenConsumeWhile( page -> { incomingRelationshipsPageCount.getAndIncrement(); logger.info("content for this page " + incomingRelationshipsPageCount); for (BasicRelationship relationship : page.getValue()) { logger.info(relationship.getId()); } if (page.getContinuationToken() != null) { assertEquals(RELATIONSHIP_PAGE_SIZE_DEFAULT, page.getValue().size(), "Unexpected page size for a non-terminal page"); } return true; }) .verifyComplete(); assertTrue(incomingRelationshipsPageCount.get() > 1, "Number of pages must be more than one."); } catch (Exception ex) { fail("Test run failed", ex); } finally { try { logger.info("Cleaning up test resources."); logger.info("Deleting created relationships."); createdRelationshipIds.forEach(relationshipId -> asyncClient.deleteRelationship(floorTwinId, relationshipId).block()); logger.info("Deleting created digital twins."); asyncClient.deleteDigitalTwin(floorTwinId).block(); asyncClient.deleteDigitalTwin(roomTwinId).block(); asyncClient.deleteDigitalTwin(hvacTwinId).block(); logger.info("Deleting created models."); asyncClient.deleteModel(floorModelId).block(); asyncClient.deleteModel(roomModelId).block(); asyncClient.deleteModel(hvacModelId).block(); } catch (Exception ex) { fail("Test cleanup failed", ex); } } }
for (int i = 0; i < pageSize++; i++) {
public void relationshipListOperationWithMultiplePages(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); String floorModelId = getUniqueModelId(FLOOR_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomModelId = getUniqueModelId(ROOM_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacModelId = getUniqueModelId(HVAC_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String floorTwinId = getUniqueDigitalTwinId(FLOOR_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomTwinId = getUniqueDigitalTwinId(ROOM_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacTwinId = getUniqueDigitalTwinId(HVAC_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); List<String> createdOutgoingRelationshipIds = new ArrayList<>(); List<String> createdIncomingRelationshipIds = new ArrayList<>(); try { createModelsAndTwins(asyncClient, floorModelId, roomModelId, hvacModelId, floorTwinId, roomTwinId, hvacTwinId); String floorContainsRoomPayload = getRelationshipWithPropertyPayload(roomTwinId, CONTAINS_RELATIONSHIP, "isAccessRestricted", true); String roomContainedInFloorPayload = getRelationshipPayload(floorTwinId, CONTAINED_IN_RELATIONSHIP); for (int i = 0; i < BULK_RELATIONSHIP_COUNT; i++) { String relationshipId = FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID + this.testResourceNamer.randomUuid(); StepVerifier.create( asyncClient.createRelationship( floorTwinId, relationshipId, deserializeJsonString(floorContainsRoomPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext(response -> logger.info("Created relationship with Id {}", relationshipId)) .verifyComplete(); createdOutgoingRelationshipIds.add(relationshipId); } for (int i = 0; i < BULK_RELATIONSHIP_COUNT; i++) { String relationshipId = ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID + this.testResourceNamer.randomUuid(); StepVerifier.create( asyncClient.createRelationship( roomTwinId, relationshipId, deserializeJsonString(roomContainedInFloorPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext(response -> logger.info("Created relationship with Id {}", relationshipId)) .verifyComplete(); createdIncomingRelationshipIds.add(relationshipId); } AtomicInteger outgoingRelationshipsPageCount = new AtomicInteger(); StepVerifier.create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class).byPage()) .thenConsumeWhile( page -> { outgoingRelationshipsPageCount.getAndIncrement(); logger.info("content for this page " + outgoingRelationshipsPageCount); for (BasicRelationship relationship : page.getValue()) { logger.info(relationship.getId()); } if (page.getContinuationToken() != null) { assertEquals(RELATIONSHIP_PAGE_SIZE_DEFAULT, page.getValue().size(), "Unexpected page size for a non-terminal page"); } return true; }) .verifyComplete(); assertThat(outgoingRelationshipsPageCount.get()).isGreaterThan(1); AtomicInteger incomingRelationshipsPageCount = new AtomicInteger(); StepVerifier.create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class).byPage()) .thenConsumeWhile( page -> { incomingRelationshipsPageCount.getAndIncrement(); logger.info("content for this page " + incomingRelationshipsPageCount); for (BasicRelationship relationship : page.getValue()) { logger.info(relationship.getId()); } if (page.getContinuationToken() != null) { assertEquals(RELATIONSHIP_PAGE_SIZE_DEFAULT, page.getValue().size(), "Unexpected page size for a non-terminal page"); } return true; }) .verifyComplete(); assertThat(incomingRelationshipsPageCount.get()).isGreaterThan(1); } catch (Exception ex) { fail("Test run failed", ex); } finally { try { logger.info("Cleaning up test resources."); logger.info("Deleting created relationships."); createdOutgoingRelationshipIds.forEach(relationshipId -> asyncClient.deleteRelationship(floorTwinId, relationshipId).block()); createdIncomingRelationshipIds.forEach(relationshipId -> asyncClient.deleteRelationship(roomTwinId, relationshipId).block()); logger.info("Deleting created digital twins."); asyncClient.deleteDigitalTwin(floorTwinId).block(); asyncClient.deleteDigitalTwin(roomTwinId).block(); asyncClient.deleteDigitalTwin(hvacTwinId).block(); logger.info("Deleting created models."); asyncClient.deleteModel(floorModelId).block(); asyncClient.deleteModel(roomModelId).block(); asyncClient.deleteModel(hvacModelId).block(); } catch (Exception ex) { fail("Test cleanup failed", ex); } } }
class DigitalTwinsRelationshipAsyncTest extends DigitalTwinsRelationshipTestBase { private final ClientLogger logger = new ClientLogger(DigitalTwinsRelationshipAsyncTest.class); @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override public void relationshipLifecycleTest(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) throws JsonProcessingException { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); String floorModelId = getUniqueModelId(FLOOR_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomModelId = getUniqueModelId(ROOM_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacModelId = getUniqueModelId(HVAC_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String floorTwinId = getUniqueDigitalTwinId(FLOOR_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomTwinId = getUniqueDigitalTwinId(ROOM_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacTwinId = getUniqueDigitalTwinId(HVAC_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); try { createModelsAndTwins(asyncClient, floorModelId, roomModelId, hvacModelId, floorTwinId, roomTwinId, hvacTwinId); String floorContainsRoomPayload = getRelationshipWithPropertyPayload(roomTwinId, CONTAINS_RELATIONSHIP, "isAccessRestricted", true); String floorTwinCoolsRelationshipPayload = getRelationshipPayload(floorTwinId, COOLS_RELATIONSHIP); String floorTwinContainedInRelationshipPayload = getRelationshipPayload(floorTwinId, CONTAINED_IN_RELATIONSHIP); String floorCooledByHvacPayload = getRelationshipPayload(hvacTwinId, COOLED_BY_RELATIONSHIP); List<Object> floorContainsRoomUpdatePayload = getRelationshipUpdatePayload("/isAccessRestricted", false); StepVerifier .create(asyncClient.createRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, deserializeJsonString(floorContainsRoomPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID) .as("Created relationship from floor -> room"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(floorTwinId, FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID, deserializeJsonString(floorCooledByHvacPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID) .as("Created relationship from floor -> hvac"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(hvacTwinId, HVAC_COOLS_FLOOR_RELATIONSHIP_ID, deserializeJsonString(floorTwinCoolsRelationshipPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(HVAC_COOLS_FLOOR_RELATIONSHIP_ID) .as("Created relationship from hvac -> floor"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, deserializeJsonString(floorTwinContainedInRelationshipPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID) .as("Created relationship from room -> floor"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier.create(asyncClient.createRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, floorTwinContainedInRelationshipPayload, String.class)) .verifyErrorSatisfies(ex -> assertRestException(ex, HTTP_PRECON_FAILED)); StepVerifier .create(asyncClient.updateRelationshipWithResponse(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorContainsRoomUpdatePayload, null)) .assertNext( voidDigitalTwinsResponse -> { assertThat(voidDigitalTwinsResponse.getStatusCode()) .as("Updated relationship floor -> room") .isEqualTo(HTTP_NO_CONTENT); logger.info("Updated {} relationship successfully in source {}", FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorTwinId); } ) .verifyComplete(); StepVerifier .create(asyncClient.getRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, BasicRelationship.class)) .assertNext(basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID) .as("Retrieved floor -> room relationship"); logger.info("Retrieved {} relationship under source {}", basicRelationship.getId(), basicRelationship.getSourceId()); }) .verifyComplete(); List<String> incomingRelationshipsSourceIds = new ArrayList<>(); StepVerifier .create(asyncClient.listIncomingRelationships(floorTwinId, null)) .assertNext(incomingRelationship -> incomingRelationshipsSourceIds.add(incomingRelationship.getSourceId())) .assertNext(incomingRelationship -> incomingRelationshipsSourceIds.add(incomingRelationship.getSourceId())) .expectComplete() .verify(); assertThat(incomingRelationshipsSourceIds) .as("Floor has incoming relationships from room and hvac") .containsExactlyInAnyOrder(roomTwinId, hvacTwinId); logger.info("Retrieved incoming relationships for {}, found sources {}", floorTwinId, Arrays.toString(incomingRelationshipsSourceIds.toArray())); List<String> relationshipsTargetIds = new ArrayList<>(); StepVerifier .create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class)) .assertNext(basicRelationship -> relationshipsTargetIds.add(basicRelationship.getTargetId())) .assertNext(basicRelationship -> relationshipsTargetIds.add(basicRelationship.getTargetId())) .expectComplete() .verify(); assertThat(relationshipsTargetIds) .as("Floor has a relationship to room and hvac") .containsExactlyInAnyOrder(roomTwinId, hvacTwinId); logger.info("Retrieved all relationships for {}, found targets {}", floorTwinId, Arrays.toString(relationshipsTargetIds.toArray())); StepVerifier .create(asyncClient.listRelationships(roomTwinId, CONTAINED_IN_RELATIONSHIP, BasicRelationship.class, null)) .assertNext(basicRelationship -> { assertThat(basicRelationship.getName()) .isEqualTo(CONTAINED_IN_RELATIONSHIP) .as("Room has only one containedIn relationship to floor"); assertThat(basicRelationship.getTargetId()) .isEqualTo(floorTwinId) .as("Room has only one containedIn relationship to floor"); logger.info("Retrieved relationship {} for twin {}", basicRelationship.getId(), roomTwinId); }) .expectComplete() .verify(); StepVerifier .create(asyncClient.deleteRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorTwinId); StepVerifier .create(asyncClient.deleteRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, roomTwinId); StepVerifier .create(asyncClient.deleteRelationship(floorTwinId, FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID, floorTwinId); StepVerifier .create(asyncClient.deleteRelationship(hvacTwinId, HVAC_COOLS_FLOOR_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", HVAC_COOLS_FLOOR_RELATIONSHIP_ID, hvacTwinId); StepVerifier .create(asyncClient.getRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, String.class)) .verifyErrorSatisfies(ex -> assertRestException(ex, HTTP_NOT_FOUND)); } finally { try { logger.info("Cleaning up test resources."); logger.info("Deleting created relationships."); List<BasicRelationship> relationships = new ArrayList<>(); asyncClient.listRelationships(floorTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); asyncClient.listRelationships(roomTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); asyncClient.listRelationships(hvacTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); relationships.forEach(basicRelationship -> asyncClient.deleteRelationship(basicRelationship.getSourceId(), basicRelationship.getId()).block()); logger.info("Deleting created digital twins."); asyncClient.deleteDigitalTwin(floorTwinId).block(); asyncClient.deleteDigitalTwin(roomTwinId).block(); asyncClient.deleteDigitalTwin(hvacTwinId).block(); logger.info("Deleting created models."); asyncClient.deleteModel(floorModelId).block(); asyncClient.deleteModel(roomModelId).block(); asyncClient.deleteModel(hvacModelId).block(); } catch (Exception ex) { fail("Test cleanup failed", ex); } } } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override private void createModelsAndTwins(DigitalTwinsAsyncClient asyncClient, String floorModelId, String roomModelId, String hvacModelId, String floorTwinId, String roomTwinId, String hvacTwinId) throws JsonProcessingException { createModelsRunner( floorModelId, roomModelId, hvacModelId, modelsList -> StepVerifier .create(asyncClient.createModels(modelsList)) .assertNext(createResponseList -> logger.info("Created models successfully")) .verifyComplete()); createFloorTwinRunner( floorTwinId, floorModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); createRoomTwinRunner( roomTwinId, roomModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); createHvacTwinRunner( hvacTwinId, hvacModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); } }
class DigitalTwinsRelationshipAsyncTest extends DigitalTwinsRelationshipTestBase { private final ClientLogger logger = new ClientLogger(DigitalTwinsRelationshipAsyncTest.class); @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override public void relationshipLifecycleTest(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) throws JsonProcessingException { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); String floorModelId = getUniqueModelId(FLOOR_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomModelId = getUniqueModelId(ROOM_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacModelId = getUniqueModelId(HVAC_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String floorTwinId = getUniqueDigitalTwinId(FLOOR_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomTwinId = getUniqueDigitalTwinId(ROOM_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacTwinId = getUniqueDigitalTwinId(HVAC_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); try { createModelsAndTwins(asyncClient, floorModelId, roomModelId, hvacModelId, floorTwinId, roomTwinId, hvacTwinId); String floorContainsRoomPayload = getRelationshipWithPropertyPayload(roomTwinId, CONTAINS_RELATIONSHIP, "isAccessRestricted", true); String floorTwinCoolsRelationshipPayload = getRelationshipPayload(floorTwinId, COOLS_RELATIONSHIP); String floorTwinContainedInRelationshipPayload = getRelationshipPayload(floorTwinId, CONTAINED_IN_RELATIONSHIP); String floorCooledByHvacPayload = getRelationshipPayload(hvacTwinId, COOLED_BY_RELATIONSHIP); List<Object> floorContainsRoomUpdatePayload = getRelationshipUpdatePayload("/isAccessRestricted", false); StepVerifier .create(asyncClient.createRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, deserializeJsonString(floorContainsRoomPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID) .as("Created relationship from floor -> room"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(floorTwinId, FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID, deserializeJsonString(floorCooledByHvacPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID) .as("Created relationship from floor -> hvac"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(hvacTwinId, HVAC_COOLS_FLOOR_RELATIONSHIP_ID, deserializeJsonString(floorTwinCoolsRelationshipPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(HVAC_COOLS_FLOOR_RELATIONSHIP_ID) .as("Created relationship from hvac -> floor"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, deserializeJsonString(floorTwinContainedInRelationshipPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID) .as("Created relationship from room -> floor"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier.create(asyncClient.createRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, floorTwinContainedInRelationshipPayload, String.class)) .verifyErrorSatisfies(ex -> assertRestException(ex, HTTP_PRECON_FAILED)); StepVerifier .create(asyncClient.updateRelationshipWithResponse(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorContainsRoomUpdatePayload, null)) .assertNext( voidDigitalTwinsResponse -> { assertThat(voidDigitalTwinsResponse.getStatusCode()) .as("Updated relationship floor -> room") .isEqualTo(HTTP_NO_CONTENT); logger.info("Updated {} relationship successfully in source {}", FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorTwinId); } ) .verifyComplete(); StepVerifier .create(asyncClient.getRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, BasicRelationship.class)) .assertNext(basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID) .as("Retrieved floor -> room relationship"); logger.info("Retrieved {} relationship under source {}", basicRelationship.getId(), basicRelationship.getSourceId()); }) .verifyComplete(); List<String> incomingRelationshipsSourceIds = new ArrayList<>(); StepVerifier .create(asyncClient.listIncomingRelationships(floorTwinId, null)) .assertNext(incomingRelationship -> incomingRelationshipsSourceIds.add(incomingRelationship.getSourceId())) .assertNext(incomingRelationship -> incomingRelationshipsSourceIds.add(incomingRelationship.getSourceId())) .expectComplete() .verify(); assertThat(incomingRelationshipsSourceIds) .as("Floor has incoming relationships from room and hvac") .containsExactlyInAnyOrder(roomTwinId, hvacTwinId); logger.info("Retrieved incoming relationships for {}, found sources {}", floorTwinId, Arrays.toString(incomingRelationshipsSourceIds.toArray())); List<String> relationshipsTargetIds = new ArrayList<>(); StepVerifier .create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class)) .assertNext(basicRelationship -> relationshipsTargetIds.add(basicRelationship.getTargetId())) .assertNext(basicRelationship -> relationshipsTargetIds.add(basicRelationship.getTargetId())) .expectComplete() .verify(); assertThat(relationshipsTargetIds) .as("Floor has a relationship to room and hvac") .containsExactlyInAnyOrder(roomTwinId, hvacTwinId); logger.info("Retrieved all relationships for {}, found targets {}", floorTwinId, Arrays.toString(relationshipsTargetIds.toArray())); StepVerifier .create(asyncClient.listRelationships(roomTwinId, CONTAINED_IN_RELATIONSHIP, BasicRelationship.class, null)) .assertNext(basicRelationship -> { assertThat(basicRelationship.getName()) .isEqualTo(CONTAINED_IN_RELATIONSHIP) .as("Room has only one containedIn relationship to floor"); assertThat(basicRelationship.getTargetId()) .isEqualTo(floorTwinId) .as("Room has only one containedIn relationship to floor"); logger.info("Retrieved relationship {} for twin {}", basicRelationship.getId(), roomTwinId); }) .expectComplete() .verify(); StepVerifier .create(asyncClient.deleteRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorTwinId); StepVerifier .create(asyncClient.deleteRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, roomTwinId); StepVerifier .create(asyncClient.deleteRelationship(floorTwinId, FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID, floorTwinId); StepVerifier .create(asyncClient.deleteRelationship(hvacTwinId, HVAC_COOLS_FLOOR_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", HVAC_COOLS_FLOOR_RELATIONSHIP_ID, hvacTwinId); StepVerifier .create(asyncClient.getRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, String.class)) .verifyErrorSatisfies(ex -> assertRestException(ex, HTTP_NOT_FOUND)); } finally { try { logger.info("Cleaning up test resources."); logger.info("Deleting created relationships."); List<BasicRelationship> relationships = new ArrayList<>(); asyncClient.listRelationships(floorTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); asyncClient.listRelationships(roomTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); asyncClient.listRelationships(hvacTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); relationships.forEach(basicRelationship -> asyncClient.deleteRelationship(basicRelationship.getSourceId(), basicRelationship.getId()).block()); logger.info("Deleting created digital twins."); asyncClient.deleteDigitalTwin(floorTwinId).block(); asyncClient.deleteDigitalTwin(roomTwinId).block(); asyncClient.deleteDigitalTwin(hvacTwinId).block(); logger.info("Deleting created models."); asyncClient.deleteModel(floorModelId).block(); asyncClient.deleteModel(roomModelId).block(); asyncClient.deleteModel(hvacModelId).block(); } catch (Exception ex) { fail("Test cleanup failed", ex); } } } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override private void createModelsAndTwins(DigitalTwinsAsyncClient asyncClient, String floorModelId, String roomModelId, String hvacModelId, String floorTwinId, String roomTwinId, String hvacTwinId) throws JsonProcessingException { createModelsRunner( floorModelId, roomModelId, hvacModelId, modelsList -> StepVerifier .create(asyncClient.createModels(modelsList)) .assertNext(createResponseList -> logger.info("Created models successfully")) .verifyComplete()); createFloorTwinRunner( floorTwinId, floorModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); createRoomTwinRunner( roomTwinId, roomModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); createHvacTwinRunner( hvacTwinId, hvacModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); } }
Can this be something more like: ```java assertThat("Number of pages must be more than 1", outgoingRelationshipsPagecount.get(), greaterThan(1)); ``` In .NET at least, doing a comparison rather than asserting true or false gets you better test failure output.
public void relationshipListOperationWithMultiplePages(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); int pageSize = 5; String floorModelId = getUniqueModelId(FLOOR_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomModelId = getUniqueModelId(ROOM_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacModelId = getUniqueModelId(HVAC_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String floorTwinId = getUniqueDigitalTwinId(FLOOR_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomTwinId = getUniqueDigitalTwinId(ROOM_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacTwinId = getUniqueDigitalTwinId(HVAC_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); final int MAX_WAIT_TIME_ASYNC_OPERATIONS_IN_SECONDS = 30; List<String> createdRelationshipIds = new ArrayList<>(); try { createModelsAndTwins(asyncClient, floorModelId, roomModelId, hvacModelId, floorTwinId, roomTwinId, hvacTwinId); String floorContainsRoomPayload = getRelationshipWithPropertyPayload(roomTwinId, CONTAINS_RELATIONSHIP, "isAccessRestricted", true); String roomContainedInFloorPayload = getRelationshipPayload(floorTwinId, CONTAINED_IN_RELATIONSHIP); for (int i = 0; i < pageSize++; i++) { String relationshipId = FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID + this.testResourceNamer.randomUuid(); StepVerifier.create( asyncClient.createRelationship( floorTwinId, relationshipId, deserializeJsonString(floorContainsRoomPayload, BasicRelationship.class), BasicRelationship.class)).verifyComplete(); createdRelationshipIds.add(relationshipId); } for (int i = 0; i < pageSize + 1; i++) { String relationshipId = ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID + this.testResourceNamer.randomUuid(); StepVerifier.create( asyncClient.createRelationship( roomTwinId, relationshipId, deserializeJsonString(roomContainedInFloorPayload, BasicRelationship.class), BasicRelationship.class)).verifyComplete(); createdRelationshipIds.add(relationshipId); } AtomicInteger outgoingRelationshipsPageCount = new AtomicInteger(); StepVerifier.create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class).byPage()) .thenConsumeWhile( page -> { outgoingRelationshipsPageCount.getAndIncrement(); logger.info("content for this page " + outgoingRelationshipsPageCount); for (BasicRelationship relationship : page.getValue()) { logger.info(relationship.getId()); } if (page.getContinuationToken() != null) { assertEquals(RELATIONSHIP_PAGE_SIZE_DEFAULT, page.getValue().size(), "Unexpected page size for a non-terminal page"); } return true; }) .verifyComplete(); assertTrue(outgoingRelationshipsPageCount.get() > 1, "Number of pages must be more than one."); AtomicInteger incomingRelationshipsPageCount = new AtomicInteger(); StepVerifier.create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class).byPage()) .thenConsumeWhile( page -> { incomingRelationshipsPageCount.getAndIncrement(); logger.info("content for this page " + incomingRelationshipsPageCount); for (BasicRelationship relationship : page.getValue()) { logger.info(relationship.getId()); } if (page.getContinuationToken() != null) { assertEquals(RELATIONSHIP_PAGE_SIZE_DEFAULT, page.getValue().size(), "Unexpected page size for a non-terminal page"); } return true; }) .verifyComplete(); assertTrue(incomingRelationshipsPageCount.get() > 1, "Number of pages must be more than one."); } catch (Exception ex) { fail("Test run failed", ex); } finally { try { logger.info("Cleaning up test resources."); logger.info("Deleting created relationships."); createdRelationshipIds.forEach(relationshipId -> asyncClient.deleteRelationship(floorTwinId, relationshipId).block()); logger.info("Deleting created digital twins."); asyncClient.deleteDigitalTwin(floorTwinId).block(); asyncClient.deleteDigitalTwin(roomTwinId).block(); asyncClient.deleteDigitalTwin(hvacTwinId).block(); logger.info("Deleting created models."); asyncClient.deleteModel(floorModelId).block(); asyncClient.deleteModel(roomModelId).block(); asyncClient.deleteModel(hvacModelId).block(); } catch (Exception ex) { fail("Test cleanup failed", ex); } } }
assertTrue(outgoingRelationshipsPageCount.get() > 1, "Number of pages must be more than one.");
public void relationshipListOperationWithMultiplePages(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); String floorModelId = getUniqueModelId(FLOOR_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomModelId = getUniqueModelId(ROOM_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacModelId = getUniqueModelId(HVAC_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String floorTwinId = getUniqueDigitalTwinId(FLOOR_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomTwinId = getUniqueDigitalTwinId(ROOM_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacTwinId = getUniqueDigitalTwinId(HVAC_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); List<String> createdOutgoingRelationshipIds = new ArrayList<>(); List<String> createdIncomingRelationshipIds = new ArrayList<>(); try { createModelsAndTwins(asyncClient, floorModelId, roomModelId, hvacModelId, floorTwinId, roomTwinId, hvacTwinId); String floorContainsRoomPayload = getRelationshipWithPropertyPayload(roomTwinId, CONTAINS_RELATIONSHIP, "isAccessRestricted", true); String roomContainedInFloorPayload = getRelationshipPayload(floorTwinId, CONTAINED_IN_RELATIONSHIP); for (int i = 0; i < BULK_RELATIONSHIP_COUNT; i++) { String relationshipId = FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID + this.testResourceNamer.randomUuid(); StepVerifier.create( asyncClient.createRelationship( floorTwinId, relationshipId, deserializeJsonString(floorContainsRoomPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext(response -> logger.info("Created relationship with Id {}", relationshipId)) .verifyComplete(); createdOutgoingRelationshipIds.add(relationshipId); } for (int i = 0; i < BULK_RELATIONSHIP_COUNT; i++) { String relationshipId = ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID + this.testResourceNamer.randomUuid(); StepVerifier.create( asyncClient.createRelationship( roomTwinId, relationshipId, deserializeJsonString(roomContainedInFloorPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext(response -> logger.info("Created relationship with Id {}", relationshipId)) .verifyComplete(); createdIncomingRelationshipIds.add(relationshipId); } AtomicInteger outgoingRelationshipsPageCount = new AtomicInteger(); StepVerifier.create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class).byPage()) .thenConsumeWhile( page -> { outgoingRelationshipsPageCount.getAndIncrement(); logger.info("content for this page " + outgoingRelationshipsPageCount); for (BasicRelationship relationship : page.getValue()) { logger.info(relationship.getId()); } if (page.getContinuationToken() != null) { assertEquals(RELATIONSHIP_PAGE_SIZE_DEFAULT, page.getValue().size(), "Unexpected page size for a non-terminal page"); } return true; }) .verifyComplete(); assertThat(outgoingRelationshipsPageCount.get()).isGreaterThan(1); AtomicInteger incomingRelationshipsPageCount = new AtomicInteger(); StepVerifier.create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class).byPage()) .thenConsumeWhile( page -> { incomingRelationshipsPageCount.getAndIncrement(); logger.info("content for this page " + incomingRelationshipsPageCount); for (BasicRelationship relationship : page.getValue()) { logger.info(relationship.getId()); } if (page.getContinuationToken() != null) { assertEquals(RELATIONSHIP_PAGE_SIZE_DEFAULT, page.getValue().size(), "Unexpected page size for a non-terminal page"); } return true; }) .verifyComplete(); assertThat(incomingRelationshipsPageCount.get()).isGreaterThan(1); } catch (Exception ex) { fail("Test run failed", ex); } finally { try { logger.info("Cleaning up test resources."); logger.info("Deleting created relationships."); createdOutgoingRelationshipIds.forEach(relationshipId -> asyncClient.deleteRelationship(floorTwinId, relationshipId).block()); createdIncomingRelationshipIds.forEach(relationshipId -> asyncClient.deleteRelationship(roomTwinId, relationshipId).block()); logger.info("Deleting created digital twins."); asyncClient.deleteDigitalTwin(floorTwinId).block(); asyncClient.deleteDigitalTwin(roomTwinId).block(); asyncClient.deleteDigitalTwin(hvacTwinId).block(); logger.info("Deleting created models."); asyncClient.deleteModel(floorModelId).block(); asyncClient.deleteModel(roomModelId).block(); asyncClient.deleteModel(hvacModelId).block(); } catch (Exception ex) { fail("Test cleanup failed", ex); } } }
class DigitalTwinsRelationshipAsyncTest extends DigitalTwinsRelationshipTestBase { private final ClientLogger logger = new ClientLogger(DigitalTwinsRelationshipAsyncTest.class); @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override public void relationshipLifecycleTest(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) throws JsonProcessingException { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); String floorModelId = getUniqueModelId(FLOOR_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomModelId = getUniqueModelId(ROOM_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacModelId = getUniqueModelId(HVAC_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String floorTwinId = getUniqueDigitalTwinId(FLOOR_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomTwinId = getUniqueDigitalTwinId(ROOM_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacTwinId = getUniqueDigitalTwinId(HVAC_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); try { createModelsAndTwins(asyncClient, floorModelId, roomModelId, hvacModelId, floorTwinId, roomTwinId, hvacTwinId); String floorContainsRoomPayload = getRelationshipWithPropertyPayload(roomTwinId, CONTAINS_RELATIONSHIP, "isAccessRestricted", true); String floorTwinCoolsRelationshipPayload = getRelationshipPayload(floorTwinId, COOLS_RELATIONSHIP); String floorTwinContainedInRelationshipPayload = getRelationshipPayload(floorTwinId, CONTAINED_IN_RELATIONSHIP); String floorCooledByHvacPayload = getRelationshipPayload(hvacTwinId, COOLED_BY_RELATIONSHIP); List<Object> floorContainsRoomUpdatePayload = getRelationshipUpdatePayload("/isAccessRestricted", false); StepVerifier .create(asyncClient.createRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, deserializeJsonString(floorContainsRoomPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID) .as("Created relationship from floor -> room"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(floorTwinId, FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID, deserializeJsonString(floorCooledByHvacPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID) .as("Created relationship from floor -> hvac"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(hvacTwinId, HVAC_COOLS_FLOOR_RELATIONSHIP_ID, deserializeJsonString(floorTwinCoolsRelationshipPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(HVAC_COOLS_FLOOR_RELATIONSHIP_ID) .as("Created relationship from hvac -> floor"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, deserializeJsonString(floorTwinContainedInRelationshipPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID) .as("Created relationship from room -> floor"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier.create(asyncClient.createRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, floorTwinContainedInRelationshipPayload, String.class)) .verifyErrorSatisfies(ex -> assertRestException(ex, HTTP_PRECON_FAILED)); StepVerifier .create(asyncClient.updateRelationshipWithResponse(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorContainsRoomUpdatePayload, null)) .assertNext( voidDigitalTwinsResponse -> { assertThat(voidDigitalTwinsResponse.getStatusCode()) .as("Updated relationship floor -> room") .isEqualTo(HTTP_NO_CONTENT); logger.info("Updated {} relationship successfully in source {}", FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorTwinId); } ) .verifyComplete(); StepVerifier .create(asyncClient.getRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, BasicRelationship.class)) .assertNext(basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID) .as("Retrieved floor -> room relationship"); logger.info("Retrieved {} relationship under source {}", basicRelationship.getId(), basicRelationship.getSourceId()); }) .verifyComplete(); List<String> incomingRelationshipsSourceIds = new ArrayList<>(); StepVerifier .create(asyncClient.listIncomingRelationships(floorTwinId, null)) .assertNext(incomingRelationship -> incomingRelationshipsSourceIds.add(incomingRelationship.getSourceId())) .assertNext(incomingRelationship -> incomingRelationshipsSourceIds.add(incomingRelationship.getSourceId())) .expectComplete() .verify(); assertThat(incomingRelationshipsSourceIds) .as("Floor has incoming relationships from room and hvac") .containsExactlyInAnyOrder(roomTwinId, hvacTwinId); logger.info("Retrieved incoming relationships for {}, found sources {}", floorTwinId, Arrays.toString(incomingRelationshipsSourceIds.toArray())); List<String> relationshipsTargetIds = new ArrayList<>(); StepVerifier .create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class)) .assertNext(basicRelationship -> relationshipsTargetIds.add(basicRelationship.getTargetId())) .assertNext(basicRelationship -> relationshipsTargetIds.add(basicRelationship.getTargetId())) .expectComplete() .verify(); assertThat(relationshipsTargetIds) .as("Floor has a relationship to room and hvac") .containsExactlyInAnyOrder(roomTwinId, hvacTwinId); logger.info("Retrieved all relationships for {}, found targets {}", floorTwinId, Arrays.toString(relationshipsTargetIds.toArray())); StepVerifier .create(asyncClient.listRelationships(roomTwinId, CONTAINED_IN_RELATIONSHIP, BasicRelationship.class, null)) .assertNext(basicRelationship -> { assertThat(basicRelationship.getName()) .isEqualTo(CONTAINED_IN_RELATIONSHIP) .as("Room has only one containedIn relationship to floor"); assertThat(basicRelationship.getTargetId()) .isEqualTo(floorTwinId) .as("Room has only one containedIn relationship to floor"); logger.info("Retrieved relationship {} for twin {}", basicRelationship.getId(), roomTwinId); }) .expectComplete() .verify(); StepVerifier .create(asyncClient.deleteRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorTwinId); StepVerifier .create(asyncClient.deleteRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, roomTwinId); StepVerifier .create(asyncClient.deleteRelationship(floorTwinId, FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID, floorTwinId); StepVerifier .create(asyncClient.deleteRelationship(hvacTwinId, HVAC_COOLS_FLOOR_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", HVAC_COOLS_FLOOR_RELATIONSHIP_ID, hvacTwinId); StepVerifier .create(asyncClient.getRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, String.class)) .verifyErrorSatisfies(ex -> assertRestException(ex, HTTP_NOT_FOUND)); } finally { try { logger.info("Cleaning up test resources."); logger.info("Deleting created relationships."); List<BasicRelationship> relationships = new ArrayList<>(); asyncClient.listRelationships(floorTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); asyncClient.listRelationships(roomTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); asyncClient.listRelationships(hvacTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); relationships.forEach(basicRelationship -> asyncClient.deleteRelationship(basicRelationship.getSourceId(), basicRelationship.getId()).block()); logger.info("Deleting created digital twins."); asyncClient.deleteDigitalTwin(floorTwinId).block(); asyncClient.deleteDigitalTwin(roomTwinId).block(); asyncClient.deleteDigitalTwin(hvacTwinId).block(); logger.info("Deleting created models."); asyncClient.deleteModel(floorModelId).block(); asyncClient.deleteModel(roomModelId).block(); asyncClient.deleteModel(hvacModelId).block(); } catch (Exception ex) { fail("Test cleanup failed", ex); } } } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override private void createModelsAndTwins(DigitalTwinsAsyncClient asyncClient, String floorModelId, String roomModelId, String hvacModelId, String floorTwinId, String roomTwinId, String hvacTwinId) throws JsonProcessingException { createModelsRunner( floorModelId, roomModelId, hvacModelId, modelsList -> StepVerifier .create(asyncClient.createModels(modelsList)) .assertNext(createResponseList -> logger.info("Created models successfully")) .verifyComplete()); createFloorTwinRunner( floorTwinId, floorModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); createRoomTwinRunner( roomTwinId, roomModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); createHvacTwinRunner( hvacTwinId, hvacModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); } }
class DigitalTwinsRelationshipAsyncTest extends DigitalTwinsRelationshipTestBase { private final ClientLogger logger = new ClientLogger(DigitalTwinsRelationshipAsyncTest.class); @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override public void relationshipLifecycleTest(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) throws JsonProcessingException { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); String floorModelId = getUniqueModelId(FLOOR_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomModelId = getUniqueModelId(ROOM_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacModelId = getUniqueModelId(HVAC_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String floorTwinId = getUniqueDigitalTwinId(FLOOR_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomTwinId = getUniqueDigitalTwinId(ROOM_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacTwinId = getUniqueDigitalTwinId(HVAC_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); try { createModelsAndTwins(asyncClient, floorModelId, roomModelId, hvacModelId, floorTwinId, roomTwinId, hvacTwinId); String floorContainsRoomPayload = getRelationshipWithPropertyPayload(roomTwinId, CONTAINS_RELATIONSHIP, "isAccessRestricted", true); String floorTwinCoolsRelationshipPayload = getRelationshipPayload(floorTwinId, COOLS_RELATIONSHIP); String floorTwinContainedInRelationshipPayload = getRelationshipPayload(floorTwinId, CONTAINED_IN_RELATIONSHIP); String floorCooledByHvacPayload = getRelationshipPayload(hvacTwinId, COOLED_BY_RELATIONSHIP); List<Object> floorContainsRoomUpdatePayload = getRelationshipUpdatePayload("/isAccessRestricted", false); StepVerifier .create(asyncClient.createRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, deserializeJsonString(floorContainsRoomPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID) .as("Created relationship from floor -> room"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(floorTwinId, FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID, deserializeJsonString(floorCooledByHvacPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID) .as("Created relationship from floor -> hvac"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(hvacTwinId, HVAC_COOLS_FLOOR_RELATIONSHIP_ID, deserializeJsonString(floorTwinCoolsRelationshipPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(HVAC_COOLS_FLOOR_RELATIONSHIP_ID) .as("Created relationship from hvac -> floor"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, deserializeJsonString(floorTwinContainedInRelationshipPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID) .as("Created relationship from room -> floor"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier.create(asyncClient.createRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, floorTwinContainedInRelationshipPayload, String.class)) .verifyErrorSatisfies(ex -> assertRestException(ex, HTTP_PRECON_FAILED)); StepVerifier .create(asyncClient.updateRelationshipWithResponse(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorContainsRoomUpdatePayload, null)) .assertNext( voidDigitalTwinsResponse -> { assertThat(voidDigitalTwinsResponse.getStatusCode()) .as("Updated relationship floor -> room") .isEqualTo(HTTP_NO_CONTENT); logger.info("Updated {} relationship successfully in source {}", FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorTwinId); } ) .verifyComplete(); StepVerifier .create(asyncClient.getRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, BasicRelationship.class)) .assertNext(basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID) .as("Retrieved floor -> room relationship"); logger.info("Retrieved {} relationship under source {}", basicRelationship.getId(), basicRelationship.getSourceId()); }) .verifyComplete(); List<String> incomingRelationshipsSourceIds = new ArrayList<>(); StepVerifier .create(asyncClient.listIncomingRelationships(floorTwinId, null)) .assertNext(incomingRelationship -> incomingRelationshipsSourceIds.add(incomingRelationship.getSourceId())) .assertNext(incomingRelationship -> incomingRelationshipsSourceIds.add(incomingRelationship.getSourceId())) .expectComplete() .verify(); assertThat(incomingRelationshipsSourceIds) .as("Floor has incoming relationships from room and hvac") .containsExactlyInAnyOrder(roomTwinId, hvacTwinId); logger.info("Retrieved incoming relationships for {}, found sources {}", floorTwinId, Arrays.toString(incomingRelationshipsSourceIds.toArray())); List<String> relationshipsTargetIds = new ArrayList<>(); StepVerifier .create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class)) .assertNext(basicRelationship -> relationshipsTargetIds.add(basicRelationship.getTargetId())) .assertNext(basicRelationship -> relationshipsTargetIds.add(basicRelationship.getTargetId())) .expectComplete() .verify(); assertThat(relationshipsTargetIds) .as("Floor has a relationship to room and hvac") .containsExactlyInAnyOrder(roomTwinId, hvacTwinId); logger.info("Retrieved all relationships for {}, found targets {}", floorTwinId, Arrays.toString(relationshipsTargetIds.toArray())); StepVerifier .create(asyncClient.listRelationships(roomTwinId, CONTAINED_IN_RELATIONSHIP, BasicRelationship.class, null)) .assertNext(basicRelationship -> { assertThat(basicRelationship.getName()) .isEqualTo(CONTAINED_IN_RELATIONSHIP) .as("Room has only one containedIn relationship to floor"); assertThat(basicRelationship.getTargetId()) .isEqualTo(floorTwinId) .as("Room has only one containedIn relationship to floor"); logger.info("Retrieved relationship {} for twin {}", basicRelationship.getId(), roomTwinId); }) .expectComplete() .verify(); StepVerifier .create(asyncClient.deleteRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorTwinId); StepVerifier .create(asyncClient.deleteRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, roomTwinId); StepVerifier .create(asyncClient.deleteRelationship(floorTwinId, FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID, floorTwinId); StepVerifier .create(asyncClient.deleteRelationship(hvacTwinId, HVAC_COOLS_FLOOR_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", HVAC_COOLS_FLOOR_RELATIONSHIP_ID, hvacTwinId); StepVerifier .create(asyncClient.getRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, String.class)) .verifyErrorSatisfies(ex -> assertRestException(ex, HTTP_NOT_FOUND)); } finally { try { logger.info("Cleaning up test resources."); logger.info("Deleting created relationships."); List<BasicRelationship> relationships = new ArrayList<>(); asyncClient.listRelationships(floorTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); asyncClient.listRelationships(roomTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); asyncClient.listRelationships(hvacTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); relationships.forEach(basicRelationship -> asyncClient.deleteRelationship(basicRelationship.getSourceId(), basicRelationship.getId()).block()); logger.info("Deleting created digital twins."); asyncClient.deleteDigitalTwin(floorTwinId).block(); asyncClient.deleteDigitalTwin(roomTwinId).block(); asyncClient.deleteDigitalTwin(hvacTwinId).block(); logger.info("Deleting created models."); asyncClient.deleteModel(floorModelId).block(); asyncClient.deleteModel(roomModelId).block(); asyncClient.deleteModel(hvacModelId).block(); } catch (Exception ex) { fail("Test cleanup failed", ex); } } } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override private void createModelsAndTwins(DigitalTwinsAsyncClient asyncClient, String floorModelId, String roomModelId, String hvacModelId, String floorTwinId, String roomTwinId, String hvacTwinId) throws JsonProcessingException { createModelsRunner( floorModelId, roomModelId, hvacModelId, modelsList -> StepVerifier .create(asyncClient.createModels(modelsList)) .assertNext(createResponseList -> logger.info("Created models successfully")) .verifyComplete()); createFloorTwinRunner( floorTwinId, floorModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); createRoomTwinRunner( roomTwinId, roomModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); createHvacTwinRunner( hvacTwinId, hvacModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); } }
weird spacing issues in this statement
public void relationshipListOperationWithMultiplePages(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) throws JsonProcessingException { DigitalTwinsClient client = getClient(httpClient, serviceVersion); String floorModelId = getUniqueModelId(FLOOR_MODEL_ID_PREFIX, client, randomIntegerStringGenerator); String roomModelId = getUniqueModelId(ROOM_MODEL_ID_PREFIX, client, randomIntegerStringGenerator); String hvacModelId = getUniqueModelId(HVAC_MODEL_ID_PREFIX, client, randomIntegerStringGenerator); String floorTwinId = getUniqueDigitalTwinId(FLOOR_TWIN_ID_PREFIX, client, randomIntegerStringGenerator); String roomTwinId = getUniqueDigitalTwinId(ROOM_TWIN_ID_PREFIX, client, randomIntegerStringGenerator); String hvacTwinId = getUniqueDigitalTwinId(HVAC_TWIN_ID_PREFIX, client, randomIntegerStringGenerator); List<String> createdRelationshipIds = new ArrayList<>(); try { createModelsAndTwins(client, floorModelId, roomModelId, hvacModelId, floorTwinId, roomTwinId, hvacTwinId); String floorContainsRoomPayload = getRelationshipWithPropertyPayload(roomTwinId, CONTAINS_RELATIONSHIP, "isAccessRestricted", true); String roomContainedInFloorPayload = getRelationshipPayload(floorTwinId, CONTAINED_IN_RELATIONSHIP); for (int i = 0 ; i< BULK_RELATIONSHIP_COUNT ; i++) { String relationshipId = FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID + this.testResourceNamer.randomUuid(); client.createRelationship(floorTwinId, relationshipId, deserializeJsonString(floorContainsRoomPayload, BasicRelationship.class), BasicRelationship.class); createdRelationshipIds.add(relationshipId); } for (int i = 0 ; i< BULK_RELATIONSHIP_COUNT ; i++) { String relationshipId = ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID + this.testResourceNamer.randomUuid(); client.createRelationship(roomTwinId, relationshipId, deserializeJsonString(roomContainedInFloorPayload, BasicRelationship.class), BasicRelationship.class); createdRelationshipIds.add(relationshipId); } PagedIterable<BasicRelationship> listOutgoingRelationships = client.listRelationships(floorTwinId, BasicRelationship.class); AtomicInteger outgoingRelationshipsPageCount = new AtomicInteger(); listOutgoingRelationships.iterableByPage().forEach(relationshipsPagedResponse -> { outgoingRelationshipsPageCount.getAndIncrement(); logger.info("content for this page " + outgoingRelationshipsPageCount); for (BasicRelationship data: relationshipsPagedResponse.getValue()) { logger.info(data.getId()); } if (relationshipsPagedResponse.getContinuationToken() != null) { assertEquals(RELATIONSHIP_PAGE_SIZE_DEFAULT, relationshipsPagedResponse.getValue().size(), "Unexpected page size for a non-terminal page"); } }); assertTrue(outgoingRelationshipsPageCount.get() > 1, "Number of pages must be more than one."); PagedIterable<BasicRelationship> listIncomingRelationships = client.listRelationships(floorTwinId, BasicRelationship.class); AtomicInteger incomingRelationshipsPageCount = new AtomicInteger(); listIncomingRelationships.iterableByPage().forEach(relationshipsPagedResponse -> { incomingRelationshipsPageCount.getAndIncrement(); logger.info("content for this page " + incomingRelationshipsPageCount); for (BasicRelationship data: relationshipsPagedResponse.getValue()) { logger.info(data.getId()); } if (relationshipsPagedResponse.getContinuationToken() != null) { assertEquals(RELATIONSHIP_PAGE_SIZE_DEFAULT, relationshipsPagedResponse.getValue().size(), "Unexpected page size for a non-terminal page"); } }); assertTrue(incomingRelationshipsPageCount.get() > 1, "Number of pages must be more than one."); } finally { try { logger.info("Cleaning up test resources."); logger.info("Deleting created relationships."); createdRelationshipIds.forEach(relationshipId -> client.deleteRelationship(floorTwinId, relationshipId)); logger.info("Deleting created digital twins."); client.deleteDigitalTwin(floorTwinId); client.deleteDigitalTwin(roomTwinId); client.deleteDigitalTwin(hvacTwinId); logger.info("Deleting created models."); client.deleteModel(floorModelId); client.deleteModel(roomModelId); client.deleteModel(hvacModelId); } catch (Exception ex) { fail("Test cleanup failed", ex); } } }
for (int i = 0 ; i< BULK_RELATIONSHIP_COUNT ; i++) {
public void relationshipListOperationWithMultiplePages(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) throws JsonProcessingException { DigitalTwinsClient client = getClient(httpClient, serviceVersion); String floorModelId = getUniqueModelId(FLOOR_MODEL_ID_PREFIX, client, randomIntegerStringGenerator); String roomModelId = getUniqueModelId(ROOM_MODEL_ID_PREFIX, client, randomIntegerStringGenerator); String hvacModelId = getUniqueModelId(HVAC_MODEL_ID_PREFIX, client, randomIntegerStringGenerator); String floorTwinId = getUniqueDigitalTwinId(FLOOR_TWIN_ID_PREFIX, client, randomIntegerStringGenerator); String roomTwinId = getUniqueDigitalTwinId(ROOM_TWIN_ID_PREFIX, client, randomIntegerStringGenerator); String hvacTwinId = getUniqueDigitalTwinId(HVAC_TWIN_ID_PREFIX, client, randomIntegerStringGenerator); List<String> createdOutgoingRelationshipIds = new ArrayList<>(); List<String> createdIncomingRelationshipIds = new ArrayList<>(); try { createModelsAndTwins(client, floorModelId, roomModelId, hvacModelId, floorTwinId, roomTwinId, hvacTwinId); String floorContainsRoomPayload = getRelationshipWithPropertyPayload(roomTwinId, CONTAINS_RELATIONSHIP, "isAccessRestricted", true); String roomContainedInFloorPayload = getRelationshipPayload(floorTwinId, CONTAINED_IN_RELATIONSHIP); for (int i = 0; i < BULK_RELATIONSHIP_COUNT; i++) { String relationshipId = FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID + this.testResourceNamer.randomUuid(); client.createRelationship(floorTwinId, relationshipId, deserializeJsonString(floorContainsRoomPayload, BasicRelationship.class), BasicRelationship.class); createdOutgoingRelationshipIds.add(relationshipId); } for (int i = 0; i < BULK_RELATIONSHIP_COUNT; i++) { String relationshipId = ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID + this.testResourceNamer.randomUuid(); client.createRelationship(roomTwinId, relationshipId, deserializeJsonString(roomContainedInFloorPayload, BasicRelationship.class), BasicRelationship.class); createdIncomingRelationshipIds.add(relationshipId); } PagedIterable<BasicRelationship> listOutgoingRelationships = client.listRelationships(floorTwinId, BasicRelationship.class); AtomicInteger outgoingRelationshipsPageCount = new AtomicInteger(); listOutgoingRelationships.iterableByPage().forEach(relationshipsPagedResponse -> { outgoingRelationshipsPageCount.getAndIncrement(); logger.info("content for this page " + outgoingRelationshipsPageCount); for (BasicRelationship data: relationshipsPagedResponse.getValue()) { logger.info(data.getId()); } if (relationshipsPagedResponse.getContinuationToken() != null) { assertEquals(RELATIONSHIP_PAGE_SIZE_DEFAULT, relationshipsPagedResponse.getValue().size(), "Unexpected page size for a non-terminal page"); } }); assertThat(outgoingRelationshipsPageCount.get()).isGreaterThan(1); PagedIterable<BasicRelationship> listIncomingRelationships = client.listRelationships(floorTwinId, BasicRelationship.class); AtomicInteger incomingRelationshipsPageCount = new AtomicInteger(); listIncomingRelationships.iterableByPage().forEach(relationshipsPagedResponse -> { incomingRelationshipsPageCount.getAndIncrement(); logger.info("content for this page " + incomingRelationshipsPageCount); for (BasicRelationship data: relationshipsPagedResponse.getValue()) { logger.info(data.getId()); } if (relationshipsPagedResponse.getContinuationToken() != null) { assertEquals(RELATIONSHIP_PAGE_SIZE_DEFAULT, relationshipsPagedResponse.getValue().size(), "Unexpected page size for a non-terminal page"); } }); assertThat(incomingRelationshipsPageCount.get()).isGreaterThan(1); } finally { try { logger.info("Cleaning up test resources."); logger.info("Deleting created relationships."); createdOutgoingRelationshipIds.forEach(relationshipId -> client.deleteRelationship(floorTwinId, relationshipId)); createdIncomingRelationshipIds.forEach(relationshipId -> client.deleteRelationship(roomTwinId, relationshipId)); logger.info("Deleting created digital twins."); client.deleteDigitalTwin(floorTwinId); client.deleteDigitalTwin(roomTwinId); client.deleteDigitalTwin(hvacTwinId); logger.info("Deleting created models."); client.deleteModel(floorModelId); client.deleteModel(roomModelId); client.deleteModel(hvacModelId); } catch (Exception ex) { fail("Test cleanup failed", ex); } } }
class DigitalTwinsRelationshipTest extends DigitalTwinsRelationshipTestBase { private final ClientLogger logger = new ClientLogger(DigitalTwinsRelationshipTest.class); @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override public void relationshipLifecycleTest(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) throws JsonProcessingException { DigitalTwinsClient client = getClient(httpClient, serviceVersion); String floorModelId = getUniqueModelId(FLOOR_MODEL_ID_PREFIX, client, randomIntegerStringGenerator); String roomModelId = getUniqueModelId(ROOM_MODEL_ID_PREFIX, client, randomIntegerStringGenerator); String hvacModelId = getUniqueModelId(HVAC_MODEL_ID_PREFIX, client, randomIntegerStringGenerator); String floorTwinId = getUniqueDigitalTwinId(FLOOR_TWIN_ID_PREFIX, client, randomIntegerStringGenerator); String roomTwinId = getUniqueDigitalTwinId(ROOM_TWIN_ID_PREFIX, client, randomIntegerStringGenerator); String hvacTwinId = getUniqueDigitalTwinId(HVAC_TWIN_ID_PREFIX, client, randomIntegerStringGenerator); try { createModelsAndTwins(client, floorModelId, roomModelId, hvacModelId, floorTwinId, roomTwinId, hvacTwinId); String floorContainsRoomPayload = getRelationshipWithPropertyPayload(roomTwinId, CONTAINS_RELATIONSHIP, "isAccessRestricted", true); String floorTwinCoolsRelationshipPayload = getRelationshipPayload(floorTwinId, COOLS_RELATIONSHIP); String floorTwinContainedInRelationshipPayload = getRelationshipPayload(floorTwinId, CONTAINED_IN_RELATIONSHIP); String floorCooledByHvacPayload = getRelationshipPayload(hvacTwinId, COOLED_BY_RELATIONSHIP); List<Object> floorContainsRoomUpdatePayload = getRelationshipUpdatePayload("/isAccessRestricted", false); BasicRelationship floorRoomRelationship = client.createRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, deserializeJsonString(floorContainsRoomPayload, BasicRelationship.class), BasicRelationship.class); assertThat(floorRoomRelationship.getId()) .isEqualTo(FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID) .as("Created relationship from floor -> room"); logger.info("Created {} relationship between source = {} and target = {}", floorRoomRelationship.getId(), floorRoomRelationship.getSourceId(), floorRoomRelationship.getTargetId()); BasicRelationship floorHvacRelationship = client.createRelationship(floorTwinId, FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID, deserializeJsonString(floorCooledByHvacPayload, BasicRelationship.class), BasicRelationship.class); assertThat(floorHvacRelationship.getId()) .isEqualTo(FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID) .as("Created relationship from floor -> hvac"); logger.info("Created {} relationship between source = {} and target = {}", floorHvacRelationship.getId(), floorHvacRelationship.getSourceId(), floorHvacRelationship.getTargetId()); BasicRelationship hvacFloorRelationship = client.createRelationship(hvacTwinId, HVAC_COOLS_FLOOR_RELATIONSHIP_ID, deserializeJsonString(floorTwinCoolsRelationshipPayload, BasicRelationship.class), BasicRelationship.class); assertThat(hvacFloorRelationship.getId()) .isEqualTo(HVAC_COOLS_FLOOR_RELATIONSHIP_ID) .as("Created relationship from hvac -> floor"); logger.info("Created {} relationship between source = {} and target = {}", hvacFloorRelationship.getId(), hvacFloorRelationship.getSourceId(), hvacFloorRelationship.getTargetId()); BasicRelationship roomFloorRelationship = client.createRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, deserializeJsonString(floorTwinContainedInRelationshipPayload, BasicRelationship.class), BasicRelationship.class); assertThat(roomFloorRelationship.getId()) .isEqualTo(ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID) .as("Created relationship from room -> floor"); logger.info("Created {} relationship between source = {} and target = {}", roomFloorRelationship.getId(), roomFloorRelationship.getSourceId(), roomFloorRelationship.getTargetId()); assertRestException( () -> client.createRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, floorTwinContainedInRelationshipPayload, String.class), HTTP_PRECON_FAILED ); DigitalTwinsResponse<Void> updateRelationshipResponse = client.updateRelationshipWithResponse(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorContainsRoomUpdatePayload, null, Context.NONE); assertThat(updateRelationshipResponse.getStatusCode()) .as("Updated relationship floor -> room") .isEqualTo(HTTP_NO_CONTENT); logger.info("Updated {} relationship successfully in source {}", FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorTwinId); BasicRelationship floorContainsRoomRelationship = client.getRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, BasicRelationship.class); assertThat(floorContainsRoomRelationship.getId()) .isEqualTo(FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID) .as("Retrieved floor -> room relationship"); logger.info("Retrieved {} relationship under source {}", floorContainsRoomRelationship.getId(), floorContainsRoomRelationship.getSourceId()); List<String> incomingRelationshipsSourceIds = new ArrayList<>(); PagedIterable<IncomingRelationship> listIncomingRelationships = client.listIncomingRelationships(floorTwinId, new DigitalTwinsListIncomingRelationshipsOptions(), Context.NONE); listIncomingRelationships.forEach(incomingRelationship -> incomingRelationshipsSourceIds.add(incomingRelationship.getSourceId())); assertThat(incomingRelationshipsSourceIds) .as("Floor has incoming relationships from room and hvac") .containsExactlyInAnyOrder(roomTwinId, hvacTwinId); logger.info("Retrieved incoming relationships for {}, found sources {}", floorTwinId, Arrays.toString(incomingRelationshipsSourceIds.toArray())); List<String> relationshipsTargetIds = new ArrayList<>(); PagedIterable<BasicRelationship> listRelationships = client.listRelationships(floorTwinId, BasicRelationship.class); listRelationships.forEach(basicRelationship -> relationshipsTargetIds.add(basicRelationship.getTargetId())); assertThat(relationshipsTargetIds) .as("Floor has a relationship to room and hvac") .containsExactlyInAnyOrder(roomTwinId, hvacTwinId); logger.info("Retrieved all relationships for {}, found targets {}", floorTwinId, Arrays.toString(relationshipsTargetIds.toArray())); List<String> containedInRelationshipsTargetIds = new ArrayList<>(); PagedIterable<BasicRelationship> listContainedInRelationship = client.listRelationships(roomTwinId, CONTAINED_IN_RELATIONSHIP, BasicRelationship.class, null, Context.NONE); listContainedInRelationship.forEach(basicRelationship -> { containedInRelationshipsTargetIds.add(basicRelationship.getTargetId()); logger.info("Retrieved relationship {} for twin {}", basicRelationship.getId(), roomTwinId); }); assertThat(containedInRelationshipsTargetIds.size()) .as("Room has only one containedIn relationship to floor") .isEqualTo(1); client.deleteRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID); logger.info("Deleted relationship {} for twin {}", FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorTwinId); client.deleteRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID); logger.info("Deleted relationship {} for twin {}", ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, roomTwinId); client.deleteRelationship(floorTwinId, FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID); logger.info("Deleted relationship {} for twin {}", FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID, floorTwinId); client.deleteRelationship(hvacTwinId, HVAC_COOLS_FLOOR_RELATIONSHIP_ID); logger.info("Deleted relationship {} for twin {}", HVAC_COOLS_FLOOR_RELATIONSHIP_ID, hvacTwinId); assertRestException( () -> client.getRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, String.class), HTTP_NOT_FOUND ); } finally { try { logger.info("Cleaning up test resources."); logger.info("Deleting created relationships."); List<BasicRelationship> relationships = new ArrayList<>(); client.listRelationships(floorTwinId, BasicRelationship.class) .iterableByPage() .forEach(basicRelationshipPagedResponse -> relationships.addAll(basicRelationshipPagedResponse.getValue())); client.listRelationships(roomTwinId, BasicRelationship.class) .iterableByPage() .forEach(basicRelationshipPagedResponse -> relationships.addAll(basicRelationshipPagedResponse.getValue())); client.listRelationships(hvacTwinId, BasicRelationship.class) .iterableByPage() .forEach(basicRelationshipPagedResponse -> relationships.addAll(basicRelationshipPagedResponse.getValue())); relationships.forEach(basicRelationship -> client.deleteRelationship(basicRelationship.getSourceId(), basicRelationship.getId())); logger.info("Deleting created digital twins."); client.deleteDigitalTwin(floorTwinId); client.deleteDigitalTwin(roomTwinId); client.deleteDigitalTwin(hvacTwinId); logger.info("Deleting created models."); client.deleteModel(floorModelId); client.deleteModel(roomModelId); client.deleteModel(hvacModelId); } catch (Exception ex) { fail("Test cleanup failed", ex); } } } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override private void createModelsAndTwins(DigitalTwinsClient client, String floorModelId, String roomModelId, String hvacModelId, String floorTwinId, String roomTwinId, String hvacTwinId) throws JsonProcessingException { createModelsRunner( floorModelId, roomModelId, hvacModelId, modelsList -> { Iterable<DigitalTwinsModelData> createdModels = client.createModels(modelsList); logger.info("Created models successfully"); } ); createFloorTwinRunner( floorTwinId, floorModelId, (twinId, twin) -> { BasicDigitalTwin createdTwin = client.createDigitalTwin(twinId, twin, BasicDigitalTwin.class); logger.info("Created {} twin successfully", createdTwin.getId()); } ); createRoomTwinRunner( roomTwinId, roomModelId, (twinId, twin) -> { BasicDigitalTwin createdTwin = client.createDigitalTwin(twinId, twin, BasicDigitalTwin.class); logger.info("Created {} twin successfully", createdTwin.getId()); } ); createHvacTwinRunner( hvacTwinId, hvacModelId, (twinId, twin) -> { BasicDigitalTwin createdTwin = client.createDigitalTwin(twinId, twin, BasicDigitalTwin.class); logger.info("Created {} twin successfully", createdTwin.getId()); } ); } }
class DigitalTwinsRelationshipTest extends DigitalTwinsRelationshipTestBase { private final ClientLogger logger = new ClientLogger(DigitalTwinsRelationshipTest.class); @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override public void relationshipLifecycleTest(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) throws JsonProcessingException { DigitalTwinsClient client = getClient(httpClient, serviceVersion); String floorModelId = getUniqueModelId(FLOOR_MODEL_ID_PREFIX, client, randomIntegerStringGenerator); String roomModelId = getUniqueModelId(ROOM_MODEL_ID_PREFIX, client, randomIntegerStringGenerator); String hvacModelId = getUniqueModelId(HVAC_MODEL_ID_PREFIX, client, randomIntegerStringGenerator); String floorTwinId = getUniqueDigitalTwinId(FLOOR_TWIN_ID_PREFIX, client, randomIntegerStringGenerator); String roomTwinId = getUniqueDigitalTwinId(ROOM_TWIN_ID_PREFIX, client, randomIntegerStringGenerator); String hvacTwinId = getUniqueDigitalTwinId(HVAC_TWIN_ID_PREFIX, client, randomIntegerStringGenerator); try { createModelsAndTwins(client, floorModelId, roomModelId, hvacModelId, floorTwinId, roomTwinId, hvacTwinId); String floorContainsRoomPayload = getRelationshipWithPropertyPayload(roomTwinId, CONTAINS_RELATIONSHIP, "isAccessRestricted", true); String floorTwinCoolsRelationshipPayload = getRelationshipPayload(floorTwinId, COOLS_RELATIONSHIP); String floorTwinContainedInRelationshipPayload = getRelationshipPayload(floorTwinId, CONTAINED_IN_RELATIONSHIP); String floorCooledByHvacPayload = getRelationshipPayload(hvacTwinId, COOLED_BY_RELATIONSHIP); List<Object> floorContainsRoomUpdatePayload = getRelationshipUpdatePayload("/isAccessRestricted", false); BasicRelationship floorRoomRelationship = client.createRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, deserializeJsonString(floorContainsRoomPayload, BasicRelationship.class), BasicRelationship.class); assertThat(floorRoomRelationship.getId()) .isEqualTo(FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID) .as("Created relationship from floor -> room"); logger.info("Created {} relationship between source = {} and target = {}", floorRoomRelationship.getId(), floorRoomRelationship.getSourceId(), floorRoomRelationship.getTargetId()); BasicRelationship floorHvacRelationship = client.createRelationship(floorTwinId, FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID, deserializeJsonString(floorCooledByHvacPayload, BasicRelationship.class), BasicRelationship.class); assertThat(floorHvacRelationship.getId()) .isEqualTo(FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID) .as("Created relationship from floor -> hvac"); logger.info("Created {} relationship between source = {} and target = {}", floorHvacRelationship.getId(), floorHvacRelationship.getSourceId(), floorHvacRelationship.getTargetId()); BasicRelationship hvacFloorRelationship = client.createRelationship(hvacTwinId, HVAC_COOLS_FLOOR_RELATIONSHIP_ID, deserializeJsonString(floorTwinCoolsRelationshipPayload, BasicRelationship.class), BasicRelationship.class); assertThat(hvacFloorRelationship.getId()) .isEqualTo(HVAC_COOLS_FLOOR_RELATIONSHIP_ID) .as("Created relationship from hvac -> floor"); logger.info("Created {} relationship between source = {} and target = {}", hvacFloorRelationship.getId(), hvacFloorRelationship.getSourceId(), hvacFloorRelationship.getTargetId()); BasicRelationship roomFloorRelationship = client.createRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, deserializeJsonString(floorTwinContainedInRelationshipPayload, BasicRelationship.class), BasicRelationship.class); assertThat(roomFloorRelationship.getId()) .isEqualTo(ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID) .as("Created relationship from room -> floor"); logger.info("Created {} relationship between source = {} and target = {}", roomFloorRelationship.getId(), roomFloorRelationship.getSourceId(), roomFloorRelationship.getTargetId()); assertRestException( () -> client.createRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, floorTwinContainedInRelationshipPayload, String.class), HTTP_PRECON_FAILED ); DigitalTwinsResponse<Void> updateRelationshipResponse = client.updateRelationshipWithResponse(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorContainsRoomUpdatePayload, null, Context.NONE); assertThat(updateRelationshipResponse.getStatusCode()) .as("Updated relationship floor -> room") .isEqualTo(HTTP_NO_CONTENT); logger.info("Updated {} relationship successfully in source {}", FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorTwinId); BasicRelationship floorContainsRoomRelationship = client.getRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, BasicRelationship.class); assertThat(floorContainsRoomRelationship.getId()) .isEqualTo(FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID) .as("Retrieved floor -> room relationship"); logger.info("Retrieved {} relationship under source {}", floorContainsRoomRelationship.getId(), floorContainsRoomRelationship.getSourceId()); List<String> incomingRelationshipsSourceIds = new ArrayList<>(); PagedIterable<IncomingRelationship> listIncomingRelationships = client.listIncomingRelationships(floorTwinId, new DigitalTwinsListIncomingRelationshipsOptions(), Context.NONE); listIncomingRelationships.forEach(incomingRelationship -> incomingRelationshipsSourceIds.add(incomingRelationship.getSourceId())); assertThat(incomingRelationshipsSourceIds) .as("Floor has incoming relationships from room and hvac") .containsExactlyInAnyOrder(roomTwinId, hvacTwinId); logger.info("Retrieved incoming relationships for {}, found sources {}", floorTwinId, Arrays.toString(incomingRelationshipsSourceIds.toArray())); List<String> relationshipsTargetIds = new ArrayList<>(); PagedIterable<BasicRelationship> listRelationships = client.listRelationships(floorTwinId, BasicRelationship.class); listRelationships.forEach(basicRelationship -> relationshipsTargetIds.add(basicRelationship.getTargetId())); assertThat(relationshipsTargetIds) .as("Floor has a relationship to room and hvac") .containsExactlyInAnyOrder(roomTwinId, hvacTwinId); logger.info("Retrieved all relationships for {}, found targets {}", floorTwinId, Arrays.toString(relationshipsTargetIds.toArray())); List<String> containedInRelationshipsTargetIds = new ArrayList<>(); PagedIterable<BasicRelationship> listContainedInRelationship = client.listRelationships(roomTwinId, CONTAINED_IN_RELATIONSHIP, BasicRelationship.class, null, Context.NONE); listContainedInRelationship.forEach(basicRelationship -> { containedInRelationshipsTargetIds.add(basicRelationship.getTargetId()); logger.info("Retrieved relationship {} for twin {}", basicRelationship.getId(), roomTwinId); }); assertThat(containedInRelationshipsTargetIds.size()) .as("Room has only one containedIn relationship to floor") .isEqualTo(1); client.deleteRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID); logger.info("Deleted relationship {} for twin {}", FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorTwinId); client.deleteRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID); logger.info("Deleted relationship {} for twin {}", ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, roomTwinId); client.deleteRelationship(floorTwinId, FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID); logger.info("Deleted relationship {} for twin {}", FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID, floorTwinId); client.deleteRelationship(hvacTwinId, HVAC_COOLS_FLOOR_RELATIONSHIP_ID); logger.info("Deleted relationship {} for twin {}", HVAC_COOLS_FLOOR_RELATIONSHIP_ID, hvacTwinId); assertRestException( () -> client.getRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, String.class), HTTP_NOT_FOUND ); } finally { try { logger.info("Cleaning up test resources."); logger.info("Deleting created relationships."); List<BasicRelationship> relationships = new ArrayList<>(); client.listRelationships(floorTwinId, BasicRelationship.class) .iterableByPage() .forEach(basicRelationshipPagedResponse -> relationships.addAll(basicRelationshipPagedResponse.getValue())); client.listRelationships(roomTwinId, BasicRelationship.class) .iterableByPage() .forEach(basicRelationshipPagedResponse -> relationships.addAll(basicRelationshipPagedResponse.getValue())); client.listRelationships(hvacTwinId, BasicRelationship.class) .iterableByPage() .forEach(basicRelationshipPagedResponse -> relationships.addAll(basicRelationshipPagedResponse.getValue())); relationships.forEach(basicRelationship -> client.deleteRelationship(basicRelationship.getSourceId(), basicRelationship.getId())); logger.info("Deleting created digital twins."); client.deleteDigitalTwin(floorTwinId); client.deleteDigitalTwin(roomTwinId); client.deleteDigitalTwin(hvacTwinId); logger.info("Deleting created models."); client.deleteModel(floorModelId); client.deleteModel(roomModelId); client.deleteModel(hvacModelId); } catch (Exception ex) { fail("Test cleanup failed", ex); } } } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override private void createModelsAndTwins(DigitalTwinsClient client, String floorModelId, String roomModelId, String hvacModelId, String floorTwinId, String roomTwinId, String hvacTwinId) throws JsonProcessingException { createModelsRunner( floorModelId, roomModelId, hvacModelId, modelsList -> { Iterable<DigitalTwinsModelData> createdModels = client.createModels(modelsList); logger.info("Created models successfully"); } ); createFloorTwinRunner( floorTwinId, floorModelId, (twinId, twin) -> { BasicDigitalTwin createdTwin = client.createDigitalTwin(twinId, twin, BasicDigitalTwin.class); logger.info("Created {} twin successfully", createdTwin.getId()); } ); createRoomTwinRunner( roomTwinId, roomModelId, (twinId, twin) -> { BasicDigitalTwin createdTwin = client.createDigitalTwin(twinId, twin, BasicDigitalTwin.class); logger.info("Created {} twin successfully", createdTwin.getId()); } ); createHvacTwinRunner( hvacTwinId, hvacModelId, (twinId, twin) -> { BasicDigitalTwin createdTwin = client.createDigitalTwin(twinId, twin, BasicDigitalTwin.class); logger.info("Created {} twin successfully", createdTwin.getId()); } ); } }
hmm, I expected this to be the `BULK_RELATIONSHIP_COUNT` instead of pageSize
public void relationshipListOperationWithMultiplePages(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); int pageSize = 5; String floorModelId = getUniqueModelId(FLOOR_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomModelId = getUniqueModelId(ROOM_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacModelId = getUniqueModelId(HVAC_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String floorTwinId = getUniqueDigitalTwinId(FLOOR_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomTwinId = getUniqueDigitalTwinId(ROOM_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacTwinId = getUniqueDigitalTwinId(HVAC_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); final int MAX_WAIT_TIME_ASYNC_OPERATIONS_IN_SECONDS = 30; List<String> createdRelationshipIds = new ArrayList<>(); try { createModelsAndTwins(asyncClient, floorModelId, roomModelId, hvacModelId, floorTwinId, roomTwinId, hvacTwinId); String floorContainsRoomPayload = getRelationshipWithPropertyPayload(roomTwinId, CONTAINS_RELATIONSHIP, "isAccessRestricted", true); String roomContainedInFloorPayload = getRelationshipPayload(floorTwinId, CONTAINED_IN_RELATIONSHIP); for (int i = 0; i < pageSize++; i++) { String relationshipId = FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID + this.testResourceNamer.randomUuid(); StepVerifier.create( asyncClient.createRelationship( floorTwinId, relationshipId, deserializeJsonString(floorContainsRoomPayload, BasicRelationship.class), BasicRelationship.class)).verifyComplete(); createdRelationshipIds.add(relationshipId); } for (int i = 0; i < pageSize + 1; i++) { String relationshipId = ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID + this.testResourceNamer.randomUuid(); StepVerifier.create( asyncClient.createRelationship( roomTwinId, relationshipId, deserializeJsonString(roomContainedInFloorPayload, BasicRelationship.class), BasicRelationship.class)).verifyComplete(); createdRelationshipIds.add(relationshipId); } AtomicInteger outgoingRelationshipsPageCount = new AtomicInteger(); StepVerifier.create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class).byPage()) .thenConsumeWhile( page -> { outgoingRelationshipsPageCount.getAndIncrement(); logger.info("content for this page " + outgoingRelationshipsPageCount); for (BasicRelationship relationship : page.getValue()) { logger.info(relationship.getId()); } if (page.getContinuationToken() != null) { assertEquals(RELATIONSHIP_PAGE_SIZE_DEFAULT, page.getValue().size(), "Unexpected page size for a non-terminal page"); } return true; }) .verifyComplete(); assertTrue(outgoingRelationshipsPageCount.get() > 1, "Number of pages must be more than one."); AtomicInteger incomingRelationshipsPageCount = new AtomicInteger(); StepVerifier.create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class).byPage()) .thenConsumeWhile( page -> { incomingRelationshipsPageCount.getAndIncrement(); logger.info("content for this page " + incomingRelationshipsPageCount); for (BasicRelationship relationship : page.getValue()) { logger.info(relationship.getId()); } if (page.getContinuationToken() != null) { assertEquals(RELATIONSHIP_PAGE_SIZE_DEFAULT, page.getValue().size(), "Unexpected page size for a non-terminal page"); } return true; }) .verifyComplete(); assertTrue(incomingRelationshipsPageCount.get() > 1, "Number of pages must be more than one."); } catch (Exception ex) { fail("Test run failed", ex); } finally { try { logger.info("Cleaning up test resources."); logger.info("Deleting created relationships."); createdRelationshipIds.forEach(relationshipId -> asyncClient.deleteRelationship(floorTwinId, relationshipId).block()); logger.info("Deleting created digital twins."); asyncClient.deleteDigitalTwin(floorTwinId).block(); asyncClient.deleteDigitalTwin(roomTwinId).block(); asyncClient.deleteDigitalTwin(hvacTwinId).block(); logger.info("Deleting created models."); asyncClient.deleteModel(floorModelId).block(); asyncClient.deleteModel(roomModelId).block(); asyncClient.deleteModel(hvacModelId).block(); } catch (Exception ex) { fail("Test cleanup failed", ex); } } }
for (int i = 0; i < pageSize + 1; i++) {
public void relationshipListOperationWithMultiplePages(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); String floorModelId = getUniqueModelId(FLOOR_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomModelId = getUniqueModelId(ROOM_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacModelId = getUniqueModelId(HVAC_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String floorTwinId = getUniqueDigitalTwinId(FLOOR_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomTwinId = getUniqueDigitalTwinId(ROOM_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacTwinId = getUniqueDigitalTwinId(HVAC_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); List<String> createdOutgoingRelationshipIds = new ArrayList<>(); List<String> createdIncomingRelationshipIds = new ArrayList<>(); try { createModelsAndTwins(asyncClient, floorModelId, roomModelId, hvacModelId, floorTwinId, roomTwinId, hvacTwinId); String floorContainsRoomPayload = getRelationshipWithPropertyPayload(roomTwinId, CONTAINS_RELATIONSHIP, "isAccessRestricted", true); String roomContainedInFloorPayload = getRelationshipPayload(floorTwinId, CONTAINED_IN_RELATIONSHIP); for (int i = 0; i < BULK_RELATIONSHIP_COUNT; i++) { String relationshipId = FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID + this.testResourceNamer.randomUuid(); StepVerifier.create( asyncClient.createRelationship( floorTwinId, relationshipId, deserializeJsonString(floorContainsRoomPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext(response -> logger.info("Created relationship with Id {}", relationshipId)) .verifyComplete(); createdOutgoingRelationshipIds.add(relationshipId); } for (int i = 0; i < BULK_RELATIONSHIP_COUNT; i++) { String relationshipId = ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID + this.testResourceNamer.randomUuid(); StepVerifier.create( asyncClient.createRelationship( roomTwinId, relationshipId, deserializeJsonString(roomContainedInFloorPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext(response -> logger.info("Created relationship with Id {}", relationshipId)) .verifyComplete(); createdIncomingRelationshipIds.add(relationshipId); } AtomicInteger outgoingRelationshipsPageCount = new AtomicInteger(); StepVerifier.create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class).byPage()) .thenConsumeWhile( page -> { outgoingRelationshipsPageCount.getAndIncrement(); logger.info("content for this page " + outgoingRelationshipsPageCount); for (BasicRelationship relationship : page.getValue()) { logger.info(relationship.getId()); } if (page.getContinuationToken() != null) { assertEquals(RELATIONSHIP_PAGE_SIZE_DEFAULT, page.getValue().size(), "Unexpected page size for a non-terminal page"); } return true; }) .verifyComplete(); assertThat(outgoingRelationshipsPageCount.get()).isGreaterThan(1); AtomicInteger incomingRelationshipsPageCount = new AtomicInteger(); StepVerifier.create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class).byPage()) .thenConsumeWhile( page -> { incomingRelationshipsPageCount.getAndIncrement(); logger.info("content for this page " + incomingRelationshipsPageCount); for (BasicRelationship relationship : page.getValue()) { logger.info(relationship.getId()); } if (page.getContinuationToken() != null) { assertEquals(RELATIONSHIP_PAGE_SIZE_DEFAULT, page.getValue().size(), "Unexpected page size for a non-terminal page"); } return true; }) .verifyComplete(); assertThat(incomingRelationshipsPageCount.get()).isGreaterThan(1); } catch (Exception ex) { fail("Test run failed", ex); } finally { try { logger.info("Cleaning up test resources."); logger.info("Deleting created relationships."); createdOutgoingRelationshipIds.forEach(relationshipId -> asyncClient.deleteRelationship(floorTwinId, relationshipId).block()); createdIncomingRelationshipIds.forEach(relationshipId -> asyncClient.deleteRelationship(roomTwinId, relationshipId).block()); logger.info("Deleting created digital twins."); asyncClient.deleteDigitalTwin(floorTwinId).block(); asyncClient.deleteDigitalTwin(roomTwinId).block(); asyncClient.deleteDigitalTwin(hvacTwinId).block(); logger.info("Deleting created models."); asyncClient.deleteModel(floorModelId).block(); asyncClient.deleteModel(roomModelId).block(); asyncClient.deleteModel(hvacModelId).block(); } catch (Exception ex) { fail("Test cleanup failed", ex); } } }
class DigitalTwinsRelationshipAsyncTest extends DigitalTwinsRelationshipTestBase { private final ClientLogger logger = new ClientLogger(DigitalTwinsRelationshipAsyncTest.class); @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override public void relationshipLifecycleTest(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) throws JsonProcessingException { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); String floorModelId = getUniqueModelId(FLOOR_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomModelId = getUniqueModelId(ROOM_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacModelId = getUniqueModelId(HVAC_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String floorTwinId = getUniqueDigitalTwinId(FLOOR_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomTwinId = getUniqueDigitalTwinId(ROOM_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacTwinId = getUniqueDigitalTwinId(HVAC_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); try { createModelsAndTwins(asyncClient, floorModelId, roomModelId, hvacModelId, floorTwinId, roomTwinId, hvacTwinId); String floorContainsRoomPayload = getRelationshipWithPropertyPayload(roomTwinId, CONTAINS_RELATIONSHIP, "isAccessRestricted", true); String floorTwinCoolsRelationshipPayload = getRelationshipPayload(floorTwinId, COOLS_RELATIONSHIP); String floorTwinContainedInRelationshipPayload = getRelationshipPayload(floorTwinId, CONTAINED_IN_RELATIONSHIP); String floorCooledByHvacPayload = getRelationshipPayload(hvacTwinId, COOLED_BY_RELATIONSHIP); List<Object> floorContainsRoomUpdatePayload = getRelationshipUpdatePayload("/isAccessRestricted", false); StepVerifier .create(asyncClient.createRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, deserializeJsonString(floorContainsRoomPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID) .as("Created relationship from floor -> room"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(floorTwinId, FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID, deserializeJsonString(floorCooledByHvacPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID) .as("Created relationship from floor -> hvac"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(hvacTwinId, HVAC_COOLS_FLOOR_RELATIONSHIP_ID, deserializeJsonString(floorTwinCoolsRelationshipPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(HVAC_COOLS_FLOOR_RELATIONSHIP_ID) .as("Created relationship from hvac -> floor"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, deserializeJsonString(floorTwinContainedInRelationshipPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID) .as("Created relationship from room -> floor"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier.create(asyncClient.createRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, floorTwinContainedInRelationshipPayload, String.class)) .verifyErrorSatisfies(ex -> assertRestException(ex, HTTP_PRECON_FAILED)); StepVerifier .create(asyncClient.updateRelationshipWithResponse(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorContainsRoomUpdatePayload, null)) .assertNext( voidDigitalTwinsResponse -> { assertThat(voidDigitalTwinsResponse.getStatusCode()) .as("Updated relationship floor -> room") .isEqualTo(HTTP_NO_CONTENT); logger.info("Updated {} relationship successfully in source {}", FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorTwinId); } ) .verifyComplete(); StepVerifier .create(asyncClient.getRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, BasicRelationship.class)) .assertNext(basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID) .as("Retrieved floor -> room relationship"); logger.info("Retrieved {} relationship under source {}", basicRelationship.getId(), basicRelationship.getSourceId()); }) .verifyComplete(); List<String> incomingRelationshipsSourceIds = new ArrayList<>(); StepVerifier .create(asyncClient.listIncomingRelationships(floorTwinId, null)) .assertNext(incomingRelationship -> incomingRelationshipsSourceIds.add(incomingRelationship.getSourceId())) .assertNext(incomingRelationship -> incomingRelationshipsSourceIds.add(incomingRelationship.getSourceId())) .expectComplete() .verify(); assertThat(incomingRelationshipsSourceIds) .as("Floor has incoming relationships from room and hvac") .containsExactlyInAnyOrder(roomTwinId, hvacTwinId); logger.info("Retrieved incoming relationships for {}, found sources {}", floorTwinId, Arrays.toString(incomingRelationshipsSourceIds.toArray())); List<String> relationshipsTargetIds = new ArrayList<>(); StepVerifier .create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class)) .assertNext(basicRelationship -> relationshipsTargetIds.add(basicRelationship.getTargetId())) .assertNext(basicRelationship -> relationshipsTargetIds.add(basicRelationship.getTargetId())) .expectComplete() .verify(); assertThat(relationshipsTargetIds) .as("Floor has a relationship to room and hvac") .containsExactlyInAnyOrder(roomTwinId, hvacTwinId); logger.info("Retrieved all relationships for {}, found targets {}", floorTwinId, Arrays.toString(relationshipsTargetIds.toArray())); StepVerifier .create(asyncClient.listRelationships(roomTwinId, CONTAINED_IN_RELATIONSHIP, BasicRelationship.class, null)) .assertNext(basicRelationship -> { assertThat(basicRelationship.getName()) .isEqualTo(CONTAINED_IN_RELATIONSHIP) .as("Room has only one containedIn relationship to floor"); assertThat(basicRelationship.getTargetId()) .isEqualTo(floorTwinId) .as("Room has only one containedIn relationship to floor"); logger.info("Retrieved relationship {} for twin {}", basicRelationship.getId(), roomTwinId); }) .expectComplete() .verify(); StepVerifier .create(asyncClient.deleteRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorTwinId); StepVerifier .create(asyncClient.deleteRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, roomTwinId); StepVerifier .create(asyncClient.deleteRelationship(floorTwinId, FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID, floorTwinId); StepVerifier .create(asyncClient.deleteRelationship(hvacTwinId, HVAC_COOLS_FLOOR_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", HVAC_COOLS_FLOOR_RELATIONSHIP_ID, hvacTwinId); StepVerifier .create(asyncClient.getRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, String.class)) .verifyErrorSatisfies(ex -> assertRestException(ex, HTTP_NOT_FOUND)); } finally { try { logger.info("Cleaning up test resources."); logger.info("Deleting created relationships."); List<BasicRelationship> relationships = new ArrayList<>(); asyncClient.listRelationships(floorTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); asyncClient.listRelationships(roomTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); asyncClient.listRelationships(hvacTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); relationships.forEach(basicRelationship -> asyncClient.deleteRelationship(basicRelationship.getSourceId(), basicRelationship.getId()).block()); logger.info("Deleting created digital twins."); asyncClient.deleteDigitalTwin(floorTwinId).block(); asyncClient.deleteDigitalTwin(roomTwinId).block(); asyncClient.deleteDigitalTwin(hvacTwinId).block(); logger.info("Deleting created models."); asyncClient.deleteModel(floorModelId).block(); asyncClient.deleteModel(roomModelId).block(); asyncClient.deleteModel(hvacModelId).block(); } catch (Exception ex) { fail("Test cleanup failed", ex); } } } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override private void createModelsAndTwins(DigitalTwinsAsyncClient asyncClient, String floorModelId, String roomModelId, String hvacModelId, String floorTwinId, String roomTwinId, String hvacTwinId) throws JsonProcessingException { createModelsRunner( floorModelId, roomModelId, hvacModelId, modelsList -> StepVerifier .create(asyncClient.createModels(modelsList)) .assertNext(createResponseList -> logger.info("Created models successfully")) .verifyComplete()); createFloorTwinRunner( floorTwinId, floorModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); createRoomTwinRunner( roomTwinId, roomModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); createHvacTwinRunner( hvacTwinId, hvacModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); } }
class DigitalTwinsRelationshipAsyncTest extends DigitalTwinsRelationshipTestBase { private final ClientLogger logger = new ClientLogger(DigitalTwinsRelationshipAsyncTest.class); @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override public void relationshipLifecycleTest(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) throws JsonProcessingException { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); String floorModelId = getUniqueModelId(FLOOR_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomModelId = getUniqueModelId(ROOM_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacModelId = getUniqueModelId(HVAC_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String floorTwinId = getUniqueDigitalTwinId(FLOOR_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomTwinId = getUniqueDigitalTwinId(ROOM_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacTwinId = getUniqueDigitalTwinId(HVAC_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); try { createModelsAndTwins(asyncClient, floorModelId, roomModelId, hvacModelId, floorTwinId, roomTwinId, hvacTwinId); String floorContainsRoomPayload = getRelationshipWithPropertyPayload(roomTwinId, CONTAINS_RELATIONSHIP, "isAccessRestricted", true); String floorTwinCoolsRelationshipPayload = getRelationshipPayload(floorTwinId, COOLS_RELATIONSHIP); String floorTwinContainedInRelationshipPayload = getRelationshipPayload(floorTwinId, CONTAINED_IN_RELATIONSHIP); String floorCooledByHvacPayload = getRelationshipPayload(hvacTwinId, COOLED_BY_RELATIONSHIP); List<Object> floorContainsRoomUpdatePayload = getRelationshipUpdatePayload("/isAccessRestricted", false); StepVerifier .create(asyncClient.createRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, deserializeJsonString(floorContainsRoomPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID) .as("Created relationship from floor -> room"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(floorTwinId, FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID, deserializeJsonString(floorCooledByHvacPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID) .as("Created relationship from floor -> hvac"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(hvacTwinId, HVAC_COOLS_FLOOR_RELATIONSHIP_ID, deserializeJsonString(floorTwinCoolsRelationshipPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(HVAC_COOLS_FLOOR_RELATIONSHIP_ID) .as("Created relationship from hvac -> floor"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, deserializeJsonString(floorTwinContainedInRelationshipPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID) .as("Created relationship from room -> floor"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier.create(asyncClient.createRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, floorTwinContainedInRelationshipPayload, String.class)) .verifyErrorSatisfies(ex -> assertRestException(ex, HTTP_PRECON_FAILED)); StepVerifier .create(asyncClient.updateRelationshipWithResponse(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorContainsRoomUpdatePayload, null)) .assertNext( voidDigitalTwinsResponse -> { assertThat(voidDigitalTwinsResponse.getStatusCode()) .as("Updated relationship floor -> room") .isEqualTo(HTTP_NO_CONTENT); logger.info("Updated {} relationship successfully in source {}", FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorTwinId); } ) .verifyComplete(); StepVerifier .create(asyncClient.getRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, BasicRelationship.class)) .assertNext(basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID) .as("Retrieved floor -> room relationship"); logger.info("Retrieved {} relationship under source {}", basicRelationship.getId(), basicRelationship.getSourceId()); }) .verifyComplete(); List<String> incomingRelationshipsSourceIds = new ArrayList<>(); StepVerifier .create(asyncClient.listIncomingRelationships(floorTwinId, null)) .assertNext(incomingRelationship -> incomingRelationshipsSourceIds.add(incomingRelationship.getSourceId())) .assertNext(incomingRelationship -> incomingRelationshipsSourceIds.add(incomingRelationship.getSourceId())) .expectComplete() .verify(); assertThat(incomingRelationshipsSourceIds) .as("Floor has incoming relationships from room and hvac") .containsExactlyInAnyOrder(roomTwinId, hvacTwinId); logger.info("Retrieved incoming relationships for {}, found sources {}", floorTwinId, Arrays.toString(incomingRelationshipsSourceIds.toArray())); List<String> relationshipsTargetIds = new ArrayList<>(); StepVerifier .create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class)) .assertNext(basicRelationship -> relationshipsTargetIds.add(basicRelationship.getTargetId())) .assertNext(basicRelationship -> relationshipsTargetIds.add(basicRelationship.getTargetId())) .expectComplete() .verify(); assertThat(relationshipsTargetIds) .as("Floor has a relationship to room and hvac") .containsExactlyInAnyOrder(roomTwinId, hvacTwinId); logger.info("Retrieved all relationships for {}, found targets {}", floorTwinId, Arrays.toString(relationshipsTargetIds.toArray())); StepVerifier .create(asyncClient.listRelationships(roomTwinId, CONTAINED_IN_RELATIONSHIP, BasicRelationship.class, null)) .assertNext(basicRelationship -> { assertThat(basicRelationship.getName()) .isEqualTo(CONTAINED_IN_RELATIONSHIP) .as("Room has only one containedIn relationship to floor"); assertThat(basicRelationship.getTargetId()) .isEqualTo(floorTwinId) .as("Room has only one containedIn relationship to floor"); logger.info("Retrieved relationship {} for twin {}", basicRelationship.getId(), roomTwinId); }) .expectComplete() .verify(); StepVerifier .create(asyncClient.deleteRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorTwinId); StepVerifier .create(asyncClient.deleteRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, roomTwinId); StepVerifier .create(asyncClient.deleteRelationship(floorTwinId, FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID, floorTwinId); StepVerifier .create(asyncClient.deleteRelationship(hvacTwinId, HVAC_COOLS_FLOOR_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", HVAC_COOLS_FLOOR_RELATIONSHIP_ID, hvacTwinId); StepVerifier .create(asyncClient.getRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, String.class)) .verifyErrorSatisfies(ex -> assertRestException(ex, HTTP_NOT_FOUND)); } finally { try { logger.info("Cleaning up test resources."); logger.info("Deleting created relationships."); List<BasicRelationship> relationships = new ArrayList<>(); asyncClient.listRelationships(floorTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); asyncClient.listRelationships(roomTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); asyncClient.listRelationships(hvacTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); relationships.forEach(basicRelationship -> asyncClient.deleteRelationship(basicRelationship.getSourceId(), basicRelationship.getId()).block()); logger.info("Deleting created digital twins."); asyncClient.deleteDigitalTwin(floorTwinId).block(); asyncClient.deleteDigitalTwin(roomTwinId).block(); asyncClient.deleteDigitalTwin(hvacTwinId).block(); logger.info("Deleting created models."); asyncClient.deleteModel(floorModelId).block(); asyncClient.deleteModel(roomModelId).block(); asyncClient.deleteModel(hvacModelId).block(); } catch (Exception ex) { fail("Test cleanup failed", ex); } } } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override private void createModelsAndTwins(DigitalTwinsAsyncClient asyncClient, String floorModelId, String roomModelId, String hvacModelId, String floorTwinId, String roomTwinId, String hvacTwinId) throws JsonProcessingException { createModelsRunner( floorModelId, roomModelId, hvacModelId, modelsList -> StepVerifier .create(asyncClient.createModels(modelsList)) .assertNext(createResponseList -> logger.info("Created models successfully")) .verifyComplete()); createFloorTwinRunner( floorTwinId, floorModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); createRoomTwinRunner( roomTwinId, roomModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); createHvacTwinRunner( hvacTwinId, hvacModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); } }
I think in this case you are only creating 5 relationships no? that's smaller than the default page size of 10
public void relationshipListOperationWithMultiplePages(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); int pageSize = 5; String floorModelId = getUniqueModelId(FLOOR_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomModelId = getUniqueModelId(ROOM_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacModelId = getUniqueModelId(HVAC_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String floorTwinId = getUniqueDigitalTwinId(FLOOR_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomTwinId = getUniqueDigitalTwinId(ROOM_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacTwinId = getUniqueDigitalTwinId(HVAC_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); final int MAX_WAIT_TIME_ASYNC_OPERATIONS_IN_SECONDS = 30; List<String> createdRelationshipIds = new ArrayList<>(); try { createModelsAndTwins(asyncClient, floorModelId, roomModelId, hvacModelId, floorTwinId, roomTwinId, hvacTwinId); String floorContainsRoomPayload = getRelationshipWithPropertyPayload(roomTwinId, CONTAINS_RELATIONSHIP, "isAccessRestricted", true); String roomContainedInFloorPayload = getRelationshipPayload(floorTwinId, CONTAINED_IN_RELATIONSHIP); for (int i = 0; i < pageSize++; i++) { String relationshipId = FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID + this.testResourceNamer.randomUuid(); StepVerifier.create( asyncClient.createRelationship( floorTwinId, relationshipId, deserializeJsonString(floorContainsRoomPayload, BasicRelationship.class), BasicRelationship.class)).verifyComplete(); createdRelationshipIds.add(relationshipId); } for (int i = 0; i < pageSize + 1; i++) { String relationshipId = ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID + this.testResourceNamer.randomUuid(); StepVerifier.create( asyncClient.createRelationship( roomTwinId, relationshipId, deserializeJsonString(roomContainedInFloorPayload, BasicRelationship.class), BasicRelationship.class)).verifyComplete(); createdRelationshipIds.add(relationshipId); } AtomicInteger outgoingRelationshipsPageCount = new AtomicInteger(); StepVerifier.create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class).byPage()) .thenConsumeWhile( page -> { outgoingRelationshipsPageCount.getAndIncrement(); logger.info("content for this page " + outgoingRelationshipsPageCount); for (BasicRelationship relationship : page.getValue()) { logger.info(relationship.getId()); } if (page.getContinuationToken() != null) { assertEquals(RELATIONSHIP_PAGE_SIZE_DEFAULT, page.getValue().size(), "Unexpected page size for a non-terminal page"); } return true; }) .verifyComplete(); assertTrue(outgoingRelationshipsPageCount.get() > 1, "Number of pages must be more than one."); AtomicInteger incomingRelationshipsPageCount = new AtomicInteger(); StepVerifier.create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class).byPage()) .thenConsumeWhile( page -> { incomingRelationshipsPageCount.getAndIncrement(); logger.info("content for this page " + incomingRelationshipsPageCount); for (BasicRelationship relationship : page.getValue()) { logger.info(relationship.getId()); } if (page.getContinuationToken() != null) { assertEquals(RELATIONSHIP_PAGE_SIZE_DEFAULT, page.getValue().size(), "Unexpected page size for a non-terminal page"); } return true; }) .verifyComplete(); assertTrue(incomingRelationshipsPageCount.get() > 1, "Number of pages must be more than one."); } catch (Exception ex) { fail("Test run failed", ex); } finally { try { logger.info("Cleaning up test resources."); logger.info("Deleting created relationships."); createdRelationshipIds.forEach(relationshipId -> asyncClient.deleteRelationship(floorTwinId, relationshipId).block()); logger.info("Deleting created digital twins."); asyncClient.deleteDigitalTwin(floorTwinId).block(); asyncClient.deleteDigitalTwin(roomTwinId).block(); asyncClient.deleteDigitalTwin(hvacTwinId).block(); logger.info("Deleting created models."); asyncClient.deleteModel(floorModelId).block(); asyncClient.deleteModel(roomModelId).block(); asyncClient.deleteModel(hvacModelId).block(); } catch (Exception ex) { fail("Test cleanup failed", ex); } } }
for (int i = 0; i < pageSize + 1; i++) {
public void relationshipListOperationWithMultiplePages(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); String floorModelId = getUniqueModelId(FLOOR_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomModelId = getUniqueModelId(ROOM_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacModelId = getUniqueModelId(HVAC_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String floorTwinId = getUniqueDigitalTwinId(FLOOR_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomTwinId = getUniqueDigitalTwinId(ROOM_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacTwinId = getUniqueDigitalTwinId(HVAC_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); List<String> createdOutgoingRelationshipIds = new ArrayList<>(); List<String> createdIncomingRelationshipIds = new ArrayList<>(); try { createModelsAndTwins(asyncClient, floorModelId, roomModelId, hvacModelId, floorTwinId, roomTwinId, hvacTwinId); String floorContainsRoomPayload = getRelationshipWithPropertyPayload(roomTwinId, CONTAINS_RELATIONSHIP, "isAccessRestricted", true); String roomContainedInFloorPayload = getRelationshipPayload(floorTwinId, CONTAINED_IN_RELATIONSHIP); for (int i = 0; i < BULK_RELATIONSHIP_COUNT; i++) { String relationshipId = FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID + this.testResourceNamer.randomUuid(); StepVerifier.create( asyncClient.createRelationship( floorTwinId, relationshipId, deserializeJsonString(floorContainsRoomPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext(response -> logger.info("Created relationship with Id {}", relationshipId)) .verifyComplete(); createdOutgoingRelationshipIds.add(relationshipId); } for (int i = 0; i < BULK_RELATIONSHIP_COUNT; i++) { String relationshipId = ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID + this.testResourceNamer.randomUuid(); StepVerifier.create( asyncClient.createRelationship( roomTwinId, relationshipId, deserializeJsonString(roomContainedInFloorPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext(response -> logger.info("Created relationship with Id {}", relationshipId)) .verifyComplete(); createdIncomingRelationshipIds.add(relationshipId); } AtomicInteger outgoingRelationshipsPageCount = new AtomicInteger(); StepVerifier.create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class).byPage()) .thenConsumeWhile( page -> { outgoingRelationshipsPageCount.getAndIncrement(); logger.info("content for this page " + outgoingRelationshipsPageCount); for (BasicRelationship relationship : page.getValue()) { logger.info(relationship.getId()); } if (page.getContinuationToken() != null) { assertEquals(RELATIONSHIP_PAGE_SIZE_DEFAULT, page.getValue().size(), "Unexpected page size for a non-terminal page"); } return true; }) .verifyComplete(); assertThat(outgoingRelationshipsPageCount.get()).isGreaterThan(1); AtomicInteger incomingRelationshipsPageCount = new AtomicInteger(); StepVerifier.create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class).byPage()) .thenConsumeWhile( page -> { incomingRelationshipsPageCount.getAndIncrement(); logger.info("content for this page " + incomingRelationshipsPageCount); for (BasicRelationship relationship : page.getValue()) { logger.info(relationship.getId()); } if (page.getContinuationToken() != null) { assertEquals(RELATIONSHIP_PAGE_SIZE_DEFAULT, page.getValue().size(), "Unexpected page size for a non-terminal page"); } return true; }) .verifyComplete(); assertThat(incomingRelationshipsPageCount.get()).isGreaterThan(1); } catch (Exception ex) { fail("Test run failed", ex); } finally { try { logger.info("Cleaning up test resources."); logger.info("Deleting created relationships."); createdOutgoingRelationshipIds.forEach(relationshipId -> asyncClient.deleteRelationship(floorTwinId, relationshipId).block()); createdIncomingRelationshipIds.forEach(relationshipId -> asyncClient.deleteRelationship(roomTwinId, relationshipId).block()); logger.info("Deleting created digital twins."); asyncClient.deleteDigitalTwin(floorTwinId).block(); asyncClient.deleteDigitalTwin(roomTwinId).block(); asyncClient.deleteDigitalTwin(hvacTwinId).block(); logger.info("Deleting created models."); asyncClient.deleteModel(floorModelId).block(); asyncClient.deleteModel(roomModelId).block(); asyncClient.deleteModel(hvacModelId).block(); } catch (Exception ex) { fail("Test cleanup failed", ex); } } }
class DigitalTwinsRelationshipAsyncTest extends DigitalTwinsRelationshipTestBase { private final ClientLogger logger = new ClientLogger(DigitalTwinsRelationshipAsyncTest.class); @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override public void relationshipLifecycleTest(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) throws JsonProcessingException { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); String floorModelId = getUniqueModelId(FLOOR_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomModelId = getUniqueModelId(ROOM_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacModelId = getUniqueModelId(HVAC_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String floorTwinId = getUniqueDigitalTwinId(FLOOR_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomTwinId = getUniqueDigitalTwinId(ROOM_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacTwinId = getUniqueDigitalTwinId(HVAC_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); try { createModelsAndTwins(asyncClient, floorModelId, roomModelId, hvacModelId, floorTwinId, roomTwinId, hvacTwinId); String floorContainsRoomPayload = getRelationshipWithPropertyPayload(roomTwinId, CONTAINS_RELATIONSHIP, "isAccessRestricted", true); String floorTwinCoolsRelationshipPayload = getRelationshipPayload(floorTwinId, COOLS_RELATIONSHIP); String floorTwinContainedInRelationshipPayload = getRelationshipPayload(floorTwinId, CONTAINED_IN_RELATIONSHIP); String floorCooledByHvacPayload = getRelationshipPayload(hvacTwinId, COOLED_BY_RELATIONSHIP); List<Object> floorContainsRoomUpdatePayload = getRelationshipUpdatePayload("/isAccessRestricted", false); StepVerifier .create(asyncClient.createRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, deserializeJsonString(floorContainsRoomPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID) .as("Created relationship from floor -> room"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(floorTwinId, FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID, deserializeJsonString(floorCooledByHvacPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID) .as("Created relationship from floor -> hvac"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(hvacTwinId, HVAC_COOLS_FLOOR_RELATIONSHIP_ID, deserializeJsonString(floorTwinCoolsRelationshipPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(HVAC_COOLS_FLOOR_RELATIONSHIP_ID) .as("Created relationship from hvac -> floor"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, deserializeJsonString(floorTwinContainedInRelationshipPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID) .as("Created relationship from room -> floor"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier.create(asyncClient.createRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, floorTwinContainedInRelationshipPayload, String.class)) .verifyErrorSatisfies(ex -> assertRestException(ex, HTTP_PRECON_FAILED)); StepVerifier .create(asyncClient.updateRelationshipWithResponse(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorContainsRoomUpdatePayload, null)) .assertNext( voidDigitalTwinsResponse -> { assertThat(voidDigitalTwinsResponse.getStatusCode()) .as("Updated relationship floor -> room") .isEqualTo(HTTP_NO_CONTENT); logger.info("Updated {} relationship successfully in source {}", FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorTwinId); } ) .verifyComplete(); StepVerifier .create(asyncClient.getRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, BasicRelationship.class)) .assertNext(basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID) .as("Retrieved floor -> room relationship"); logger.info("Retrieved {} relationship under source {}", basicRelationship.getId(), basicRelationship.getSourceId()); }) .verifyComplete(); List<String> incomingRelationshipsSourceIds = new ArrayList<>(); StepVerifier .create(asyncClient.listIncomingRelationships(floorTwinId, null)) .assertNext(incomingRelationship -> incomingRelationshipsSourceIds.add(incomingRelationship.getSourceId())) .assertNext(incomingRelationship -> incomingRelationshipsSourceIds.add(incomingRelationship.getSourceId())) .expectComplete() .verify(); assertThat(incomingRelationshipsSourceIds) .as("Floor has incoming relationships from room and hvac") .containsExactlyInAnyOrder(roomTwinId, hvacTwinId); logger.info("Retrieved incoming relationships for {}, found sources {}", floorTwinId, Arrays.toString(incomingRelationshipsSourceIds.toArray())); List<String> relationshipsTargetIds = new ArrayList<>(); StepVerifier .create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class)) .assertNext(basicRelationship -> relationshipsTargetIds.add(basicRelationship.getTargetId())) .assertNext(basicRelationship -> relationshipsTargetIds.add(basicRelationship.getTargetId())) .expectComplete() .verify(); assertThat(relationshipsTargetIds) .as("Floor has a relationship to room and hvac") .containsExactlyInAnyOrder(roomTwinId, hvacTwinId); logger.info("Retrieved all relationships for {}, found targets {}", floorTwinId, Arrays.toString(relationshipsTargetIds.toArray())); StepVerifier .create(asyncClient.listRelationships(roomTwinId, CONTAINED_IN_RELATIONSHIP, BasicRelationship.class, null)) .assertNext(basicRelationship -> { assertThat(basicRelationship.getName()) .isEqualTo(CONTAINED_IN_RELATIONSHIP) .as("Room has only one containedIn relationship to floor"); assertThat(basicRelationship.getTargetId()) .isEqualTo(floorTwinId) .as("Room has only one containedIn relationship to floor"); logger.info("Retrieved relationship {} for twin {}", basicRelationship.getId(), roomTwinId); }) .expectComplete() .verify(); StepVerifier .create(asyncClient.deleteRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorTwinId); StepVerifier .create(asyncClient.deleteRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, roomTwinId); StepVerifier .create(asyncClient.deleteRelationship(floorTwinId, FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID, floorTwinId); StepVerifier .create(asyncClient.deleteRelationship(hvacTwinId, HVAC_COOLS_FLOOR_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", HVAC_COOLS_FLOOR_RELATIONSHIP_ID, hvacTwinId); StepVerifier .create(asyncClient.getRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, String.class)) .verifyErrorSatisfies(ex -> assertRestException(ex, HTTP_NOT_FOUND)); } finally { try { logger.info("Cleaning up test resources."); logger.info("Deleting created relationships."); List<BasicRelationship> relationships = new ArrayList<>(); asyncClient.listRelationships(floorTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); asyncClient.listRelationships(roomTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); asyncClient.listRelationships(hvacTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); relationships.forEach(basicRelationship -> asyncClient.deleteRelationship(basicRelationship.getSourceId(), basicRelationship.getId()).block()); logger.info("Deleting created digital twins."); asyncClient.deleteDigitalTwin(floorTwinId).block(); asyncClient.deleteDigitalTwin(roomTwinId).block(); asyncClient.deleteDigitalTwin(hvacTwinId).block(); logger.info("Deleting created models."); asyncClient.deleteModel(floorModelId).block(); asyncClient.deleteModel(roomModelId).block(); asyncClient.deleteModel(hvacModelId).block(); } catch (Exception ex) { fail("Test cleanup failed", ex); } } } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override private void createModelsAndTwins(DigitalTwinsAsyncClient asyncClient, String floorModelId, String roomModelId, String hvacModelId, String floorTwinId, String roomTwinId, String hvacTwinId) throws JsonProcessingException { createModelsRunner( floorModelId, roomModelId, hvacModelId, modelsList -> StepVerifier .create(asyncClient.createModels(modelsList)) .assertNext(createResponseList -> logger.info("Created models successfully")) .verifyComplete()); createFloorTwinRunner( floorTwinId, floorModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); createRoomTwinRunner( roomTwinId, roomModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); createHvacTwinRunner( hvacTwinId, hvacModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); } }
class DigitalTwinsRelationshipAsyncTest extends DigitalTwinsRelationshipTestBase { private final ClientLogger logger = new ClientLogger(DigitalTwinsRelationshipAsyncTest.class); @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override public void relationshipLifecycleTest(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) throws JsonProcessingException { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); String floorModelId = getUniqueModelId(FLOOR_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomModelId = getUniqueModelId(ROOM_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacModelId = getUniqueModelId(HVAC_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String floorTwinId = getUniqueDigitalTwinId(FLOOR_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomTwinId = getUniqueDigitalTwinId(ROOM_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacTwinId = getUniqueDigitalTwinId(HVAC_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); try { createModelsAndTwins(asyncClient, floorModelId, roomModelId, hvacModelId, floorTwinId, roomTwinId, hvacTwinId); String floorContainsRoomPayload = getRelationshipWithPropertyPayload(roomTwinId, CONTAINS_RELATIONSHIP, "isAccessRestricted", true); String floorTwinCoolsRelationshipPayload = getRelationshipPayload(floorTwinId, COOLS_RELATIONSHIP); String floorTwinContainedInRelationshipPayload = getRelationshipPayload(floorTwinId, CONTAINED_IN_RELATIONSHIP); String floorCooledByHvacPayload = getRelationshipPayload(hvacTwinId, COOLED_BY_RELATIONSHIP); List<Object> floorContainsRoomUpdatePayload = getRelationshipUpdatePayload("/isAccessRestricted", false); StepVerifier .create(asyncClient.createRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, deserializeJsonString(floorContainsRoomPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID) .as("Created relationship from floor -> room"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(floorTwinId, FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID, deserializeJsonString(floorCooledByHvacPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID) .as("Created relationship from floor -> hvac"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(hvacTwinId, HVAC_COOLS_FLOOR_RELATIONSHIP_ID, deserializeJsonString(floorTwinCoolsRelationshipPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(HVAC_COOLS_FLOOR_RELATIONSHIP_ID) .as("Created relationship from hvac -> floor"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, deserializeJsonString(floorTwinContainedInRelationshipPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID) .as("Created relationship from room -> floor"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier.create(asyncClient.createRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, floorTwinContainedInRelationshipPayload, String.class)) .verifyErrorSatisfies(ex -> assertRestException(ex, HTTP_PRECON_FAILED)); StepVerifier .create(asyncClient.updateRelationshipWithResponse(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorContainsRoomUpdatePayload, null)) .assertNext( voidDigitalTwinsResponse -> { assertThat(voidDigitalTwinsResponse.getStatusCode()) .as("Updated relationship floor -> room") .isEqualTo(HTTP_NO_CONTENT); logger.info("Updated {} relationship successfully in source {}", FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorTwinId); } ) .verifyComplete(); StepVerifier .create(asyncClient.getRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, BasicRelationship.class)) .assertNext(basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID) .as("Retrieved floor -> room relationship"); logger.info("Retrieved {} relationship under source {}", basicRelationship.getId(), basicRelationship.getSourceId()); }) .verifyComplete(); List<String> incomingRelationshipsSourceIds = new ArrayList<>(); StepVerifier .create(asyncClient.listIncomingRelationships(floorTwinId, null)) .assertNext(incomingRelationship -> incomingRelationshipsSourceIds.add(incomingRelationship.getSourceId())) .assertNext(incomingRelationship -> incomingRelationshipsSourceIds.add(incomingRelationship.getSourceId())) .expectComplete() .verify(); assertThat(incomingRelationshipsSourceIds) .as("Floor has incoming relationships from room and hvac") .containsExactlyInAnyOrder(roomTwinId, hvacTwinId); logger.info("Retrieved incoming relationships for {}, found sources {}", floorTwinId, Arrays.toString(incomingRelationshipsSourceIds.toArray())); List<String> relationshipsTargetIds = new ArrayList<>(); StepVerifier .create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class)) .assertNext(basicRelationship -> relationshipsTargetIds.add(basicRelationship.getTargetId())) .assertNext(basicRelationship -> relationshipsTargetIds.add(basicRelationship.getTargetId())) .expectComplete() .verify(); assertThat(relationshipsTargetIds) .as("Floor has a relationship to room and hvac") .containsExactlyInAnyOrder(roomTwinId, hvacTwinId); logger.info("Retrieved all relationships for {}, found targets {}", floorTwinId, Arrays.toString(relationshipsTargetIds.toArray())); StepVerifier .create(asyncClient.listRelationships(roomTwinId, CONTAINED_IN_RELATIONSHIP, BasicRelationship.class, null)) .assertNext(basicRelationship -> { assertThat(basicRelationship.getName()) .isEqualTo(CONTAINED_IN_RELATIONSHIP) .as("Room has only one containedIn relationship to floor"); assertThat(basicRelationship.getTargetId()) .isEqualTo(floorTwinId) .as("Room has only one containedIn relationship to floor"); logger.info("Retrieved relationship {} for twin {}", basicRelationship.getId(), roomTwinId); }) .expectComplete() .verify(); StepVerifier .create(asyncClient.deleteRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorTwinId); StepVerifier .create(asyncClient.deleteRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, roomTwinId); StepVerifier .create(asyncClient.deleteRelationship(floorTwinId, FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID, floorTwinId); StepVerifier .create(asyncClient.deleteRelationship(hvacTwinId, HVAC_COOLS_FLOOR_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", HVAC_COOLS_FLOOR_RELATIONSHIP_ID, hvacTwinId); StepVerifier .create(asyncClient.getRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, String.class)) .verifyErrorSatisfies(ex -> assertRestException(ex, HTTP_NOT_FOUND)); } finally { try { logger.info("Cleaning up test resources."); logger.info("Deleting created relationships."); List<BasicRelationship> relationships = new ArrayList<>(); asyncClient.listRelationships(floorTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); asyncClient.listRelationships(roomTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); asyncClient.listRelationships(hvacTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); relationships.forEach(basicRelationship -> asyncClient.deleteRelationship(basicRelationship.getSourceId(), basicRelationship.getId()).block()); logger.info("Deleting created digital twins."); asyncClient.deleteDigitalTwin(floorTwinId).block(); asyncClient.deleteDigitalTwin(roomTwinId).block(); asyncClient.deleteDigitalTwin(hvacTwinId).block(); logger.info("Deleting created models."); asyncClient.deleteModel(floorModelId).block(); asyncClient.deleteModel(roomModelId).block(); asyncClient.deleteModel(hvacModelId).block(); } catch (Exception ex) { fail("Test cleanup failed", ex); } } } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override private void createModelsAndTwins(DigitalTwinsAsyncClient asyncClient, String floorModelId, String roomModelId, String hvacModelId, String floorTwinId, String roomTwinId, String hvacTwinId) throws JsonProcessingException { createModelsRunner( floorModelId, roomModelId, hvacModelId, modelsList -> StepVerifier .create(asyncClient.createModels(modelsList)) .assertNext(createResponseList -> logger.info("Created models successfully")) .verifyComplete()); createFloorTwinRunner( floorTwinId, floorModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); createRoomTwinRunner( roomTwinId, roomModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); createHvacTwinRunner( hvacTwinId, hvacModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); } }
Good catch, that's not right
public void relationshipListOperationWithMultiplePages(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); int pageSize = 5; String floorModelId = getUniqueModelId(FLOOR_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomModelId = getUniqueModelId(ROOM_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacModelId = getUniqueModelId(HVAC_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String floorTwinId = getUniqueDigitalTwinId(FLOOR_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomTwinId = getUniqueDigitalTwinId(ROOM_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacTwinId = getUniqueDigitalTwinId(HVAC_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); final int MAX_WAIT_TIME_ASYNC_OPERATIONS_IN_SECONDS = 30; List<String> createdRelationshipIds = new ArrayList<>(); try { createModelsAndTwins(asyncClient, floorModelId, roomModelId, hvacModelId, floorTwinId, roomTwinId, hvacTwinId); String floorContainsRoomPayload = getRelationshipWithPropertyPayload(roomTwinId, CONTAINS_RELATIONSHIP, "isAccessRestricted", true); String roomContainedInFloorPayload = getRelationshipPayload(floorTwinId, CONTAINED_IN_RELATIONSHIP); for (int i = 0; i < pageSize++; i++) { String relationshipId = FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID + this.testResourceNamer.randomUuid(); StepVerifier.create( asyncClient.createRelationship( floorTwinId, relationshipId, deserializeJsonString(floorContainsRoomPayload, BasicRelationship.class), BasicRelationship.class)).verifyComplete(); createdRelationshipIds.add(relationshipId); } for (int i = 0; i < pageSize + 1; i++) { String relationshipId = ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID + this.testResourceNamer.randomUuid(); StepVerifier.create( asyncClient.createRelationship( roomTwinId, relationshipId, deserializeJsonString(roomContainedInFloorPayload, BasicRelationship.class), BasicRelationship.class)).verifyComplete(); createdRelationshipIds.add(relationshipId); } AtomicInteger outgoingRelationshipsPageCount = new AtomicInteger(); StepVerifier.create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class).byPage()) .thenConsumeWhile( page -> { outgoingRelationshipsPageCount.getAndIncrement(); logger.info("content for this page " + outgoingRelationshipsPageCount); for (BasicRelationship relationship : page.getValue()) { logger.info(relationship.getId()); } if (page.getContinuationToken() != null) { assertEquals(RELATIONSHIP_PAGE_SIZE_DEFAULT, page.getValue().size(), "Unexpected page size for a non-terminal page"); } return true; }) .verifyComplete(); assertTrue(outgoingRelationshipsPageCount.get() > 1, "Number of pages must be more than one."); AtomicInteger incomingRelationshipsPageCount = new AtomicInteger(); StepVerifier.create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class).byPage()) .thenConsumeWhile( page -> { incomingRelationshipsPageCount.getAndIncrement(); logger.info("content for this page " + incomingRelationshipsPageCount); for (BasicRelationship relationship : page.getValue()) { logger.info(relationship.getId()); } if (page.getContinuationToken() != null) { assertEquals(RELATIONSHIP_PAGE_SIZE_DEFAULT, page.getValue().size(), "Unexpected page size for a non-terminal page"); } return true; }) .verifyComplete(); assertTrue(incomingRelationshipsPageCount.get() > 1, "Number of pages must be more than one."); } catch (Exception ex) { fail("Test run failed", ex); } finally { try { logger.info("Cleaning up test resources."); logger.info("Deleting created relationships."); createdRelationshipIds.forEach(relationshipId -> asyncClient.deleteRelationship(floorTwinId, relationshipId).block()); logger.info("Deleting created digital twins."); asyncClient.deleteDigitalTwin(floorTwinId).block(); asyncClient.deleteDigitalTwin(roomTwinId).block(); asyncClient.deleteDigitalTwin(hvacTwinId).block(); logger.info("Deleting created models."); asyncClient.deleteModel(floorModelId).block(); asyncClient.deleteModel(roomModelId).block(); asyncClient.deleteModel(hvacModelId).block(); } catch (Exception ex) { fail("Test cleanup failed", ex); } } }
for (int i = 0; i < pageSize++; i++) {
public void relationshipListOperationWithMultiplePages(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); String floorModelId = getUniqueModelId(FLOOR_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomModelId = getUniqueModelId(ROOM_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacModelId = getUniqueModelId(HVAC_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String floorTwinId = getUniqueDigitalTwinId(FLOOR_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomTwinId = getUniqueDigitalTwinId(ROOM_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacTwinId = getUniqueDigitalTwinId(HVAC_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); List<String> createdOutgoingRelationshipIds = new ArrayList<>(); List<String> createdIncomingRelationshipIds = new ArrayList<>(); try { createModelsAndTwins(asyncClient, floorModelId, roomModelId, hvacModelId, floorTwinId, roomTwinId, hvacTwinId); String floorContainsRoomPayload = getRelationshipWithPropertyPayload(roomTwinId, CONTAINS_RELATIONSHIP, "isAccessRestricted", true); String roomContainedInFloorPayload = getRelationshipPayload(floorTwinId, CONTAINED_IN_RELATIONSHIP); for (int i = 0; i < BULK_RELATIONSHIP_COUNT; i++) { String relationshipId = FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID + this.testResourceNamer.randomUuid(); StepVerifier.create( asyncClient.createRelationship( floorTwinId, relationshipId, deserializeJsonString(floorContainsRoomPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext(response -> logger.info("Created relationship with Id {}", relationshipId)) .verifyComplete(); createdOutgoingRelationshipIds.add(relationshipId); } for (int i = 0; i < BULK_RELATIONSHIP_COUNT; i++) { String relationshipId = ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID + this.testResourceNamer.randomUuid(); StepVerifier.create( asyncClient.createRelationship( roomTwinId, relationshipId, deserializeJsonString(roomContainedInFloorPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext(response -> logger.info("Created relationship with Id {}", relationshipId)) .verifyComplete(); createdIncomingRelationshipIds.add(relationshipId); } AtomicInteger outgoingRelationshipsPageCount = new AtomicInteger(); StepVerifier.create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class).byPage()) .thenConsumeWhile( page -> { outgoingRelationshipsPageCount.getAndIncrement(); logger.info("content for this page " + outgoingRelationshipsPageCount); for (BasicRelationship relationship : page.getValue()) { logger.info(relationship.getId()); } if (page.getContinuationToken() != null) { assertEquals(RELATIONSHIP_PAGE_SIZE_DEFAULT, page.getValue().size(), "Unexpected page size for a non-terminal page"); } return true; }) .verifyComplete(); assertThat(outgoingRelationshipsPageCount.get()).isGreaterThan(1); AtomicInteger incomingRelationshipsPageCount = new AtomicInteger(); StepVerifier.create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class).byPage()) .thenConsumeWhile( page -> { incomingRelationshipsPageCount.getAndIncrement(); logger.info("content for this page " + incomingRelationshipsPageCount); for (BasicRelationship relationship : page.getValue()) { logger.info(relationship.getId()); } if (page.getContinuationToken() != null) { assertEquals(RELATIONSHIP_PAGE_SIZE_DEFAULT, page.getValue().size(), "Unexpected page size for a non-terminal page"); } return true; }) .verifyComplete(); assertThat(incomingRelationshipsPageCount.get()).isGreaterThan(1); } catch (Exception ex) { fail("Test run failed", ex); } finally { try { logger.info("Cleaning up test resources."); logger.info("Deleting created relationships."); createdOutgoingRelationshipIds.forEach(relationshipId -> asyncClient.deleteRelationship(floorTwinId, relationshipId).block()); createdIncomingRelationshipIds.forEach(relationshipId -> asyncClient.deleteRelationship(roomTwinId, relationshipId).block()); logger.info("Deleting created digital twins."); asyncClient.deleteDigitalTwin(floorTwinId).block(); asyncClient.deleteDigitalTwin(roomTwinId).block(); asyncClient.deleteDigitalTwin(hvacTwinId).block(); logger.info("Deleting created models."); asyncClient.deleteModel(floorModelId).block(); asyncClient.deleteModel(roomModelId).block(); asyncClient.deleteModel(hvacModelId).block(); } catch (Exception ex) { fail("Test cleanup failed", ex); } } }
class DigitalTwinsRelationshipAsyncTest extends DigitalTwinsRelationshipTestBase { private final ClientLogger logger = new ClientLogger(DigitalTwinsRelationshipAsyncTest.class); @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override public void relationshipLifecycleTest(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) throws JsonProcessingException { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); String floorModelId = getUniqueModelId(FLOOR_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomModelId = getUniqueModelId(ROOM_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacModelId = getUniqueModelId(HVAC_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String floorTwinId = getUniqueDigitalTwinId(FLOOR_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomTwinId = getUniqueDigitalTwinId(ROOM_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacTwinId = getUniqueDigitalTwinId(HVAC_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); try { createModelsAndTwins(asyncClient, floorModelId, roomModelId, hvacModelId, floorTwinId, roomTwinId, hvacTwinId); String floorContainsRoomPayload = getRelationshipWithPropertyPayload(roomTwinId, CONTAINS_RELATIONSHIP, "isAccessRestricted", true); String floorTwinCoolsRelationshipPayload = getRelationshipPayload(floorTwinId, COOLS_RELATIONSHIP); String floorTwinContainedInRelationshipPayload = getRelationshipPayload(floorTwinId, CONTAINED_IN_RELATIONSHIP); String floorCooledByHvacPayload = getRelationshipPayload(hvacTwinId, COOLED_BY_RELATIONSHIP); List<Object> floorContainsRoomUpdatePayload = getRelationshipUpdatePayload("/isAccessRestricted", false); StepVerifier .create(asyncClient.createRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, deserializeJsonString(floorContainsRoomPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID) .as("Created relationship from floor -> room"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(floorTwinId, FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID, deserializeJsonString(floorCooledByHvacPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID) .as("Created relationship from floor -> hvac"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(hvacTwinId, HVAC_COOLS_FLOOR_RELATIONSHIP_ID, deserializeJsonString(floorTwinCoolsRelationshipPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(HVAC_COOLS_FLOOR_RELATIONSHIP_ID) .as("Created relationship from hvac -> floor"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, deserializeJsonString(floorTwinContainedInRelationshipPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID) .as("Created relationship from room -> floor"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier.create(asyncClient.createRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, floorTwinContainedInRelationshipPayload, String.class)) .verifyErrorSatisfies(ex -> assertRestException(ex, HTTP_PRECON_FAILED)); StepVerifier .create(asyncClient.updateRelationshipWithResponse(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorContainsRoomUpdatePayload, null)) .assertNext( voidDigitalTwinsResponse -> { assertThat(voidDigitalTwinsResponse.getStatusCode()) .as("Updated relationship floor -> room") .isEqualTo(HTTP_NO_CONTENT); logger.info("Updated {} relationship successfully in source {}", FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorTwinId); } ) .verifyComplete(); StepVerifier .create(asyncClient.getRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, BasicRelationship.class)) .assertNext(basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID) .as("Retrieved floor -> room relationship"); logger.info("Retrieved {} relationship under source {}", basicRelationship.getId(), basicRelationship.getSourceId()); }) .verifyComplete(); List<String> incomingRelationshipsSourceIds = new ArrayList<>(); StepVerifier .create(asyncClient.listIncomingRelationships(floorTwinId, null)) .assertNext(incomingRelationship -> incomingRelationshipsSourceIds.add(incomingRelationship.getSourceId())) .assertNext(incomingRelationship -> incomingRelationshipsSourceIds.add(incomingRelationship.getSourceId())) .expectComplete() .verify(); assertThat(incomingRelationshipsSourceIds) .as("Floor has incoming relationships from room and hvac") .containsExactlyInAnyOrder(roomTwinId, hvacTwinId); logger.info("Retrieved incoming relationships for {}, found sources {}", floorTwinId, Arrays.toString(incomingRelationshipsSourceIds.toArray())); List<String> relationshipsTargetIds = new ArrayList<>(); StepVerifier .create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class)) .assertNext(basicRelationship -> relationshipsTargetIds.add(basicRelationship.getTargetId())) .assertNext(basicRelationship -> relationshipsTargetIds.add(basicRelationship.getTargetId())) .expectComplete() .verify(); assertThat(relationshipsTargetIds) .as("Floor has a relationship to room and hvac") .containsExactlyInAnyOrder(roomTwinId, hvacTwinId); logger.info("Retrieved all relationships for {}, found targets {}", floorTwinId, Arrays.toString(relationshipsTargetIds.toArray())); StepVerifier .create(asyncClient.listRelationships(roomTwinId, CONTAINED_IN_RELATIONSHIP, BasicRelationship.class, null)) .assertNext(basicRelationship -> { assertThat(basicRelationship.getName()) .isEqualTo(CONTAINED_IN_RELATIONSHIP) .as("Room has only one containedIn relationship to floor"); assertThat(basicRelationship.getTargetId()) .isEqualTo(floorTwinId) .as("Room has only one containedIn relationship to floor"); logger.info("Retrieved relationship {} for twin {}", basicRelationship.getId(), roomTwinId); }) .expectComplete() .verify(); StepVerifier .create(asyncClient.deleteRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorTwinId); StepVerifier .create(asyncClient.deleteRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, roomTwinId); StepVerifier .create(asyncClient.deleteRelationship(floorTwinId, FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID, floorTwinId); StepVerifier .create(asyncClient.deleteRelationship(hvacTwinId, HVAC_COOLS_FLOOR_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", HVAC_COOLS_FLOOR_RELATIONSHIP_ID, hvacTwinId); StepVerifier .create(asyncClient.getRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, String.class)) .verifyErrorSatisfies(ex -> assertRestException(ex, HTTP_NOT_FOUND)); } finally { try { logger.info("Cleaning up test resources."); logger.info("Deleting created relationships."); List<BasicRelationship> relationships = new ArrayList<>(); asyncClient.listRelationships(floorTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); asyncClient.listRelationships(roomTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); asyncClient.listRelationships(hvacTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); relationships.forEach(basicRelationship -> asyncClient.deleteRelationship(basicRelationship.getSourceId(), basicRelationship.getId()).block()); logger.info("Deleting created digital twins."); asyncClient.deleteDigitalTwin(floorTwinId).block(); asyncClient.deleteDigitalTwin(roomTwinId).block(); asyncClient.deleteDigitalTwin(hvacTwinId).block(); logger.info("Deleting created models."); asyncClient.deleteModel(floorModelId).block(); asyncClient.deleteModel(roomModelId).block(); asyncClient.deleteModel(hvacModelId).block(); } catch (Exception ex) { fail("Test cleanup failed", ex); } } } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override private void createModelsAndTwins(DigitalTwinsAsyncClient asyncClient, String floorModelId, String roomModelId, String hvacModelId, String floorTwinId, String roomTwinId, String hvacTwinId) throws JsonProcessingException { createModelsRunner( floorModelId, roomModelId, hvacModelId, modelsList -> StepVerifier .create(asyncClient.createModels(modelsList)) .assertNext(createResponseList -> logger.info("Created models successfully")) .verifyComplete()); createFloorTwinRunner( floorTwinId, floorModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); createRoomTwinRunner( roomTwinId, roomModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); createHvacTwinRunner( hvacTwinId, hvacModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); } }
class DigitalTwinsRelationshipAsyncTest extends DigitalTwinsRelationshipTestBase { private final ClientLogger logger = new ClientLogger(DigitalTwinsRelationshipAsyncTest.class); @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override public void relationshipLifecycleTest(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) throws JsonProcessingException { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); String floorModelId = getUniqueModelId(FLOOR_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomModelId = getUniqueModelId(ROOM_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacModelId = getUniqueModelId(HVAC_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String floorTwinId = getUniqueDigitalTwinId(FLOOR_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomTwinId = getUniqueDigitalTwinId(ROOM_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacTwinId = getUniqueDigitalTwinId(HVAC_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); try { createModelsAndTwins(asyncClient, floorModelId, roomModelId, hvacModelId, floorTwinId, roomTwinId, hvacTwinId); String floorContainsRoomPayload = getRelationshipWithPropertyPayload(roomTwinId, CONTAINS_RELATIONSHIP, "isAccessRestricted", true); String floorTwinCoolsRelationshipPayload = getRelationshipPayload(floorTwinId, COOLS_RELATIONSHIP); String floorTwinContainedInRelationshipPayload = getRelationshipPayload(floorTwinId, CONTAINED_IN_RELATIONSHIP); String floorCooledByHvacPayload = getRelationshipPayload(hvacTwinId, COOLED_BY_RELATIONSHIP); List<Object> floorContainsRoomUpdatePayload = getRelationshipUpdatePayload("/isAccessRestricted", false); StepVerifier .create(asyncClient.createRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, deserializeJsonString(floorContainsRoomPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID) .as("Created relationship from floor -> room"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(floorTwinId, FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID, deserializeJsonString(floorCooledByHvacPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID) .as("Created relationship from floor -> hvac"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(hvacTwinId, HVAC_COOLS_FLOOR_RELATIONSHIP_ID, deserializeJsonString(floorTwinCoolsRelationshipPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(HVAC_COOLS_FLOOR_RELATIONSHIP_ID) .as("Created relationship from hvac -> floor"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, deserializeJsonString(floorTwinContainedInRelationshipPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID) .as("Created relationship from room -> floor"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier.create(asyncClient.createRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, floorTwinContainedInRelationshipPayload, String.class)) .verifyErrorSatisfies(ex -> assertRestException(ex, HTTP_PRECON_FAILED)); StepVerifier .create(asyncClient.updateRelationshipWithResponse(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorContainsRoomUpdatePayload, null)) .assertNext( voidDigitalTwinsResponse -> { assertThat(voidDigitalTwinsResponse.getStatusCode()) .as("Updated relationship floor -> room") .isEqualTo(HTTP_NO_CONTENT); logger.info("Updated {} relationship successfully in source {}", FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorTwinId); } ) .verifyComplete(); StepVerifier .create(asyncClient.getRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, BasicRelationship.class)) .assertNext(basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID) .as("Retrieved floor -> room relationship"); logger.info("Retrieved {} relationship under source {}", basicRelationship.getId(), basicRelationship.getSourceId()); }) .verifyComplete(); List<String> incomingRelationshipsSourceIds = new ArrayList<>(); StepVerifier .create(asyncClient.listIncomingRelationships(floorTwinId, null)) .assertNext(incomingRelationship -> incomingRelationshipsSourceIds.add(incomingRelationship.getSourceId())) .assertNext(incomingRelationship -> incomingRelationshipsSourceIds.add(incomingRelationship.getSourceId())) .expectComplete() .verify(); assertThat(incomingRelationshipsSourceIds) .as("Floor has incoming relationships from room and hvac") .containsExactlyInAnyOrder(roomTwinId, hvacTwinId); logger.info("Retrieved incoming relationships for {}, found sources {}", floorTwinId, Arrays.toString(incomingRelationshipsSourceIds.toArray())); List<String> relationshipsTargetIds = new ArrayList<>(); StepVerifier .create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class)) .assertNext(basicRelationship -> relationshipsTargetIds.add(basicRelationship.getTargetId())) .assertNext(basicRelationship -> relationshipsTargetIds.add(basicRelationship.getTargetId())) .expectComplete() .verify(); assertThat(relationshipsTargetIds) .as("Floor has a relationship to room and hvac") .containsExactlyInAnyOrder(roomTwinId, hvacTwinId); logger.info("Retrieved all relationships for {}, found targets {}", floorTwinId, Arrays.toString(relationshipsTargetIds.toArray())); StepVerifier .create(asyncClient.listRelationships(roomTwinId, CONTAINED_IN_RELATIONSHIP, BasicRelationship.class, null)) .assertNext(basicRelationship -> { assertThat(basicRelationship.getName()) .isEqualTo(CONTAINED_IN_RELATIONSHIP) .as("Room has only one containedIn relationship to floor"); assertThat(basicRelationship.getTargetId()) .isEqualTo(floorTwinId) .as("Room has only one containedIn relationship to floor"); logger.info("Retrieved relationship {} for twin {}", basicRelationship.getId(), roomTwinId); }) .expectComplete() .verify(); StepVerifier .create(asyncClient.deleteRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorTwinId); StepVerifier .create(asyncClient.deleteRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, roomTwinId); StepVerifier .create(asyncClient.deleteRelationship(floorTwinId, FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID, floorTwinId); StepVerifier .create(asyncClient.deleteRelationship(hvacTwinId, HVAC_COOLS_FLOOR_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", HVAC_COOLS_FLOOR_RELATIONSHIP_ID, hvacTwinId); StepVerifier .create(asyncClient.getRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, String.class)) .verifyErrorSatisfies(ex -> assertRestException(ex, HTTP_NOT_FOUND)); } finally { try { logger.info("Cleaning up test resources."); logger.info("Deleting created relationships."); List<BasicRelationship> relationships = new ArrayList<>(); asyncClient.listRelationships(floorTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); asyncClient.listRelationships(roomTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); asyncClient.listRelationships(hvacTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); relationships.forEach(basicRelationship -> asyncClient.deleteRelationship(basicRelationship.getSourceId(), basicRelationship.getId()).block()); logger.info("Deleting created digital twins."); asyncClient.deleteDigitalTwin(floorTwinId).block(); asyncClient.deleteDigitalTwin(roomTwinId).block(); asyncClient.deleteDigitalTwin(hvacTwinId).block(); logger.info("Deleting created models."); asyncClient.deleteModel(floorModelId).block(); asyncClient.deleteModel(roomModelId).block(); asyncClient.deleteModel(hvacModelId).block(); } catch (Exception ex) { fail("Test cleanup failed", ex); } } } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override private void createModelsAndTwins(DigitalTwinsAsyncClient asyncClient, String floorModelId, String roomModelId, String hvacModelId, String floorTwinId, String roomTwinId, String hvacTwinId) throws JsonProcessingException { createModelsRunner( floorModelId, roomModelId, hvacModelId, modelsList -> StepVerifier .create(asyncClient.createModels(modelsList)) .assertNext(createResponseList -> logger.info("Created models successfully")) .verifyComplete()); createFloorTwinRunner( floorTwinId, floorModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); createRoomTwinRunner( roomTwinId, roomModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); createHvacTwinRunner( hvacTwinId, hvacModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); } }
Correct, I made a small mistake here. I was going to write this test the same way as the other pagination tests, but then I discovered we can't choose the page size here. I must have forgotten to revert this part after I made that discovery
public void relationshipListOperationWithMultiplePages(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); int pageSize = 5; String floorModelId = getUniqueModelId(FLOOR_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomModelId = getUniqueModelId(ROOM_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacModelId = getUniqueModelId(HVAC_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String floorTwinId = getUniqueDigitalTwinId(FLOOR_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomTwinId = getUniqueDigitalTwinId(ROOM_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacTwinId = getUniqueDigitalTwinId(HVAC_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); final int MAX_WAIT_TIME_ASYNC_OPERATIONS_IN_SECONDS = 30; List<String> createdRelationshipIds = new ArrayList<>(); try { createModelsAndTwins(asyncClient, floorModelId, roomModelId, hvacModelId, floorTwinId, roomTwinId, hvacTwinId); String floorContainsRoomPayload = getRelationshipWithPropertyPayload(roomTwinId, CONTAINS_RELATIONSHIP, "isAccessRestricted", true); String roomContainedInFloorPayload = getRelationshipPayload(floorTwinId, CONTAINED_IN_RELATIONSHIP); for (int i = 0; i < pageSize++; i++) { String relationshipId = FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID + this.testResourceNamer.randomUuid(); StepVerifier.create( asyncClient.createRelationship( floorTwinId, relationshipId, deserializeJsonString(floorContainsRoomPayload, BasicRelationship.class), BasicRelationship.class)).verifyComplete(); createdRelationshipIds.add(relationshipId); } for (int i = 0; i < pageSize + 1; i++) { String relationshipId = ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID + this.testResourceNamer.randomUuid(); StepVerifier.create( asyncClient.createRelationship( roomTwinId, relationshipId, deserializeJsonString(roomContainedInFloorPayload, BasicRelationship.class), BasicRelationship.class)).verifyComplete(); createdRelationshipIds.add(relationshipId); } AtomicInteger outgoingRelationshipsPageCount = new AtomicInteger(); StepVerifier.create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class).byPage()) .thenConsumeWhile( page -> { outgoingRelationshipsPageCount.getAndIncrement(); logger.info("content for this page " + outgoingRelationshipsPageCount); for (BasicRelationship relationship : page.getValue()) { logger.info(relationship.getId()); } if (page.getContinuationToken() != null) { assertEquals(RELATIONSHIP_PAGE_SIZE_DEFAULT, page.getValue().size(), "Unexpected page size for a non-terminal page"); } return true; }) .verifyComplete(); assertTrue(outgoingRelationshipsPageCount.get() > 1, "Number of pages must be more than one."); AtomicInteger incomingRelationshipsPageCount = new AtomicInteger(); StepVerifier.create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class).byPage()) .thenConsumeWhile( page -> { incomingRelationshipsPageCount.getAndIncrement(); logger.info("content for this page " + incomingRelationshipsPageCount); for (BasicRelationship relationship : page.getValue()) { logger.info(relationship.getId()); } if (page.getContinuationToken() != null) { assertEquals(RELATIONSHIP_PAGE_SIZE_DEFAULT, page.getValue().size(), "Unexpected page size for a non-terminal page"); } return true; }) .verifyComplete(); assertTrue(incomingRelationshipsPageCount.get() > 1, "Number of pages must be more than one."); } catch (Exception ex) { fail("Test run failed", ex); } finally { try { logger.info("Cleaning up test resources."); logger.info("Deleting created relationships."); createdRelationshipIds.forEach(relationshipId -> asyncClient.deleteRelationship(floorTwinId, relationshipId).block()); logger.info("Deleting created digital twins."); asyncClient.deleteDigitalTwin(floorTwinId).block(); asyncClient.deleteDigitalTwin(roomTwinId).block(); asyncClient.deleteDigitalTwin(hvacTwinId).block(); logger.info("Deleting created models."); asyncClient.deleteModel(floorModelId).block(); asyncClient.deleteModel(roomModelId).block(); asyncClient.deleteModel(hvacModelId).block(); } catch (Exception ex) { fail("Test cleanup failed", ex); } } }
for (int i = 0; i < pageSize + 1; i++) {
public void relationshipListOperationWithMultiplePages(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); String floorModelId = getUniqueModelId(FLOOR_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomModelId = getUniqueModelId(ROOM_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacModelId = getUniqueModelId(HVAC_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String floorTwinId = getUniqueDigitalTwinId(FLOOR_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomTwinId = getUniqueDigitalTwinId(ROOM_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacTwinId = getUniqueDigitalTwinId(HVAC_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); List<String> createdOutgoingRelationshipIds = new ArrayList<>(); List<String> createdIncomingRelationshipIds = new ArrayList<>(); try { createModelsAndTwins(asyncClient, floorModelId, roomModelId, hvacModelId, floorTwinId, roomTwinId, hvacTwinId); String floorContainsRoomPayload = getRelationshipWithPropertyPayload(roomTwinId, CONTAINS_RELATIONSHIP, "isAccessRestricted", true); String roomContainedInFloorPayload = getRelationshipPayload(floorTwinId, CONTAINED_IN_RELATIONSHIP); for (int i = 0; i < BULK_RELATIONSHIP_COUNT; i++) { String relationshipId = FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID + this.testResourceNamer.randomUuid(); StepVerifier.create( asyncClient.createRelationship( floorTwinId, relationshipId, deserializeJsonString(floorContainsRoomPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext(response -> logger.info("Created relationship with Id {}", relationshipId)) .verifyComplete(); createdOutgoingRelationshipIds.add(relationshipId); } for (int i = 0; i < BULK_RELATIONSHIP_COUNT; i++) { String relationshipId = ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID + this.testResourceNamer.randomUuid(); StepVerifier.create( asyncClient.createRelationship( roomTwinId, relationshipId, deserializeJsonString(roomContainedInFloorPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext(response -> logger.info("Created relationship with Id {}", relationshipId)) .verifyComplete(); createdIncomingRelationshipIds.add(relationshipId); } AtomicInteger outgoingRelationshipsPageCount = new AtomicInteger(); StepVerifier.create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class).byPage()) .thenConsumeWhile( page -> { outgoingRelationshipsPageCount.getAndIncrement(); logger.info("content for this page " + outgoingRelationshipsPageCount); for (BasicRelationship relationship : page.getValue()) { logger.info(relationship.getId()); } if (page.getContinuationToken() != null) { assertEquals(RELATIONSHIP_PAGE_SIZE_DEFAULT, page.getValue().size(), "Unexpected page size for a non-terminal page"); } return true; }) .verifyComplete(); assertThat(outgoingRelationshipsPageCount.get()).isGreaterThan(1); AtomicInteger incomingRelationshipsPageCount = new AtomicInteger(); StepVerifier.create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class).byPage()) .thenConsumeWhile( page -> { incomingRelationshipsPageCount.getAndIncrement(); logger.info("content for this page " + incomingRelationshipsPageCount); for (BasicRelationship relationship : page.getValue()) { logger.info(relationship.getId()); } if (page.getContinuationToken() != null) { assertEquals(RELATIONSHIP_PAGE_SIZE_DEFAULT, page.getValue().size(), "Unexpected page size for a non-terminal page"); } return true; }) .verifyComplete(); assertThat(incomingRelationshipsPageCount.get()).isGreaterThan(1); } catch (Exception ex) { fail("Test run failed", ex); } finally { try { logger.info("Cleaning up test resources."); logger.info("Deleting created relationships."); createdOutgoingRelationshipIds.forEach(relationshipId -> asyncClient.deleteRelationship(floorTwinId, relationshipId).block()); createdIncomingRelationshipIds.forEach(relationshipId -> asyncClient.deleteRelationship(roomTwinId, relationshipId).block()); logger.info("Deleting created digital twins."); asyncClient.deleteDigitalTwin(floorTwinId).block(); asyncClient.deleteDigitalTwin(roomTwinId).block(); asyncClient.deleteDigitalTwin(hvacTwinId).block(); logger.info("Deleting created models."); asyncClient.deleteModel(floorModelId).block(); asyncClient.deleteModel(roomModelId).block(); asyncClient.deleteModel(hvacModelId).block(); } catch (Exception ex) { fail("Test cleanup failed", ex); } } }
class DigitalTwinsRelationshipAsyncTest extends DigitalTwinsRelationshipTestBase { private final ClientLogger logger = new ClientLogger(DigitalTwinsRelationshipAsyncTest.class); @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override public void relationshipLifecycleTest(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) throws JsonProcessingException { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); String floorModelId = getUniqueModelId(FLOOR_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomModelId = getUniqueModelId(ROOM_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacModelId = getUniqueModelId(HVAC_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String floorTwinId = getUniqueDigitalTwinId(FLOOR_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomTwinId = getUniqueDigitalTwinId(ROOM_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacTwinId = getUniqueDigitalTwinId(HVAC_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); try { createModelsAndTwins(asyncClient, floorModelId, roomModelId, hvacModelId, floorTwinId, roomTwinId, hvacTwinId); String floorContainsRoomPayload = getRelationshipWithPropertyPayload(roomTwinId, CONTAINS_RELATIONSHIP, "isAccessRestricted", true); String floorTwinCoolsRelationshipPayload = getRelationshipPayload(floorTwinId, COOLS_RELATIONSHIP); String floorTwinContainedInRelationshipPayload = getRelationshipPayload(floorTwinId, CONTAINED_IN_RELATIONSHIP); String floorCooledByHvacPayload = getRelationshipPayload(hvacTwinId, COOLED_BY_RELATIONSHIP); List<Object> floorContainsRoomUpdatePayload = getRelationshipUpdatePayload("/isAccessRestricted", false); StepVerifier .create(asyncClient.createRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, deserializeJsonString(floorContainsRoomPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID) .as("Created relationship from floor -> room"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(floorTwinId, FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID, deserializeJsonString(floorCooledByHvacPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID) .as("Created relationship from floor -> hvac"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(hvacTwinId, HVAC_COOLS_FLOOR_RELATIONSHIP_ID, deserializeJsonString(floorTwinCoolsRelationshipPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(HVAC_COOLS_FLOOR_RELATIONSHIP_ID) .as("Created relationship from hvac -> floor"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, deserializeJsonString(floorTwinContainedInRelationshipPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID) .as("Created relationship from room -> floor"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier.create(asyncClient.createRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, floorTwinContainedInRelationshipPayload, String.class)) .verifyErrorSatisfies(ex -> assertRestException(ex, HTTP_PRECON_FAILED)); StepVerifier .create(asyncClient.updateRelationshipWithResponse(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorContainsRoomUpdatePayload, null)) .assertNext( voidDigitalTwinsResponse -> { assertThat(voidDigitalTwinsResponse.getStatusCode()) .as("Updated relationship floor -> room") .isEqualTo(HTTP_NO_CONTENT); logger.info("Updated {} relationship successfully in source {}", FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorTwinId); } ) .verifyComplete(); StepVerifier .create(asyncClient.getRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, BasicRelationship.class)) .assertNext(basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID) .as("Retrieved floor -> room relationship"); logger.info("Retrieved {} relationship under source {}", basicRelationship.getId(), basicRelationship.getSourceId()); }) .verifyComplete(); List<String> incomingRelationshipsSourceIds = new ArrayList<>(); StepVerifier .create(asyncClient.listIncomingRelationships(floorTwinId, null)) .assertNext(incomingRelationship -> incomingRelationshipsSourceIds.add(incomingRelationship.getSourceId())) .assertNext(incomingRelationship -> incomingRelationshipsSourceIds.add(incomingRelationship.getSourceId())) .expectComplete() .verify(); assertThat(incomingRelationshipsSourceIds) .as("Floor has incoming relationships from room and hvac") .containsExactlyInAnyOrder(roomTwinId, hvacTwinId); logger.info("Retrieved incoming relationships for {}, found sources {}", floorTwinId, Arrays.toString(incomingRelationshipsSourceIds.toArray())); List<String> relationshipsTargetIds = new ArrayList<>(); StepVerifier .create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class)) .assertNext(basicRelationship -> relationshipsTargetIds.add(basicRelationship.getTargetId())) .assertNext(basicRelationship -> relationshipsTargetIds.add(basicRelationship.getTargetId())) .expectComplete() .verify(); assertThat(relationshipsTargetIds) .as("Floor has a relationship to room and hvac") .containsExactlyInAnyOrder(roomTwinId, hvacTwinId); logger.info("Retrieved all relationships for {}, found targets {}", floorTwinId, Arrays.toString(relationshipsTargetIds.toArray())); StepVerifier .create(asyncClient.listRelationships(roomTwinId, CONTAINED_IN_RELATIONSHIP, BasicRelationship.class, null)) .assertNext(basicRelationship -> { assertThat(basicRelationship.getName()) .isEqualTo(CONTAINED_IN_RELATIONSHIP) .as("Room has only one containedIn relationship to floor"); assertThat(basicRelationship.getTargetId()) .isEqualTo(floorTwinId) .as("Room has only one containedIn relationship to floor"); logger.info("Retrieved relationship {} for twin {}", basicRelationship.getId(), roomTwinId); }) .expectComplete() .verify(); StepVerifier .create(asyncClient.deleteRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorTwinId); StepVerifier .create(asyncClient.deleteRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, roomTwinId); StepVerifier .create(asyncClient.deleteRelationship(floorTwinId, FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID, floorTwinId); StepVerifier .create(asyncClient.deleteRelationship(hvacTwinId, HVAC_COOLS_FLOOR_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", HVAC_COOLS_FLOOR_RELATIONSHIP_ID, hvacTwinId); StepVerifier .create(asyncClient.getRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, String.class)) .verifyErrorSatisfies(ex -> assertRestException(ex, HTTP_NOT_FOUND)); } finally { try { logger.info("Cleaning up test resources."); logger.info("Deleting created relationships."); List<BasicRelationship> relationships = new ArrayList<>(); asyncClient.listRelationships(floorTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); asyncClient.listRelationships(roomTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); asyncClient.listRelationships(hvacTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); relationships.forEach(basicRelationship -> asyncClient.deleteRelationship(basicRelationship.getSourceId(), basicRelationship.getId()).block()); logger.info("Deleting created digital twins."); asyncClient.deleteDigitalTwin(floorTwinId).block(); asyncClient.deleteDigitalTwin(roomTwinId).block(); asyncClient.deleteDigitalTwin(hvacTwinId).block(); logger.info("Deleting created models."); asyncClient.deleteModel(floorModelId).block(); asyncClient.deleteModel(roomModelId).block(); asyncClient.deleteModel(hvacModelId).block(); } catch (Exception ex) { fail("Test cleanup failed", ex); } } } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override private void createModelsAndTwins(DigitalTwinsAsyncClient asyncClient, String floorModelId, String roomModelId, String hvacModelId, String floorTwinId, String roomTwinId, String hvacTwinId) throws JsonProcessingException { createModelsRunner( floorModelId, roomModelId, hvacModelId, modelsList -> StepVerifier .create(asyncClient.createModels(modelsList)) .assertNext(createResponseList -> logger.info("Created models successfully")) .verifyComplete()); createFloorTwinRunner( floorTwinId, floorModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); createRoomTwinRunner( roomTwinId, roomModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); createHvacTwinRunner( hvacTwinId, hvacModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); } }
class DigitalTwinsRelationshipAsyncTest extends DigitalTwinsRelationshipTestBase { private final ClientLogger logger = new ClientLogger(DigitalTwinsRelationshipAsyncTest.class); @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override public void relationshipLifecycleTest(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) throws JsonProcessingException { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); String floorModelId = getUniqueModelId(FLOOR_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomModelId = getUniqueModelId(ROOM_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacModelId = getUniqueModelId(HVAC_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String floorTwinId = getUniqueDigitalTwinId(FLOOR_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomTwinId = getUniqueDigitalTwinId(ROOM_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String hvacTwinId = getUniqueDigitalTwinId(HVAC_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); try { createModelsAndTwins(asyncClient, floorModelId, roomModelId, hvacModelId, floorTwinId, roomTwinId, hvacTwinId); String floorContainsRoomPayload = getRelationshipWithPropertyPayload(roomTwinId, CONTAINS_RELATIONSHIP, "isAccessRestricted", true); String floorTwinCoolsRelationshipPayload = getRelationshipPayload(floorTwinId, COOLS_RELATIONSHIP); String floorTwinContainedInRelationshipPayload = getRelationshipPayload(floorTwinId, CONTAINED_IN_RELATIONSHIP); String floorCooledByHvacPayload = getRelationshipPayload(hvacTwinId, COOLED_BY_RELATIONSHIP); List<Object> floorContainsRoomUpdatePayload = getRelationshipUpdatePayload("/isAccessRestricted", false); StepVerifier .create(asyncClient.createRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, deserializeJsonString(floorContainsRoomPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID) .as("Created relationship from floor -> room"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(floorTwinId, FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID, deserializeJsonString(floorCooledByHvacPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID) .as("Created relationship from floor -> hvac"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(hvacTwinId, HVAC_COOLS_FLOOR_RELATIONSHIP_ID, deserializeJsonString(floorTwinCoolsRelationshipPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(HVAC_COOLS_FLOOR_RELATIONSHIP_ID) .as("Created relationship from hvac -> floor"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier .create(asyncClient.createRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, deserializeJsonString(floorTwinContainedInRelationshipPayload, BasicRelationship.class), BasicRelationship.class)) .assertNext( basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID) .as("Created relationship from room -> floor"); logger.info("Created {} relationship between source = {} and target = {}", basicRelationship.getId(), basicRelationship.getSourceId(), basicRelationship.getTargetId()); } ) .verifyComplete(); StepVerifier.create(asyncClient.createRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, floorTwinContainedInRelationshipPayload, String.class)) .verifyErrorSatisfies(ex -> assertRestException(ex, HTTP_PRECON_FAILED)); StepVerifier .create(asyncClient.updateRelationshipWithResponse(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorContainsRoomUpdatePayload, null)) .assertNext( voidDigitalTwinsResponse -> { assertThat(voidDigitalTwinsResponse.getStatusCode()) .as("Updated relationship floor -> room") .isEqualTo(HTTP_NO_CONTENT); logger.info("Updated {} relationship successfully in source {}", FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorTwinId); } ) .verifyComplete(); StepVerifier .create(asyncClient.getRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, BasicRelationship.class)) .assertNext(basicRelationship -> { assertThat(basicRelationship.getId()) .isEqualTo(FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID) .as("Retrieved floor -> room relationship"); logger.info("Retrieved {} relationship under source {}", basicRelationship.getId(), basicRelationship.getSourceId()); }) .verifyComplete(); List<String> incomingRelationshipsSourceIds = new ArrayList<>(); StepVerifier .create(asyncClient.listIncomingRelationships(floorTwinId, null)) .assertNext(incomingRelationship -> incomingRelationshipsSourceIds.add(incomingRelationship.getSourceId())) .assertNext(incomingRelationship -> incomingRelationshipsSourceIds.add(incomingRelationship.getSourceId())) .expectComplete() .verify(); assertThat(incomingRelationshipsSourceIds) .as("Floor has incoming relationships from room and hvac") .containsExactlyInAnyOrder(roomTwinId, hvacTwinId); logger.info("Retrieved incoming relationships for {}, found sources {}", floorTwinId, Arrays.toString(incomingRelationshipsSourceIds.toArray())); List<String> relationshipsTargetIds = new ArrayList<>(); StepVerifier .create(asyncClient.listRelationships(floorTwinId, BasicRelationship.class)) .assertNext(basicRelationship -> relationshipsTargetIds.add(basicRelationship.getTargetId())) .assertNext(basicRelationship -> relationshipsTargetIds.add(basicRelationship.getTargetId())) .expectComplete() .verify(); assertThat(relationshipsTargetIds) .as("Floor has a relationship to room and hvac") .containsExactlyInAnyOrder(roomTwinId, hvacTwinId); logger.info("Retrieved all relationships for {}, found targets {}", floorTwinId, Arrays.toString(relationshipsTargetIds.toArray())); StepVerifier .create(asyncClient.listRelationships(roomTwinId, CONTAINED_IN_RELATIONSHIP, BasicRelationship.class, null)) .assertNext(basicRelationship -> { assertThat(basicRelationship.getName()) .isEqualTo(CONTAINED_IN_RELATIONSHIP) .as("Room has only one containedIn relationship to floor"); assertThat(basicRelationship.getTargetId()) .isEqualTo(floorTwinId) .as("Room has only one containedIn relationship to floor"); logger.info("Retrieved relationship {} for twin {}", basicRelationship.getId(), roomTwinId); }) .expectComplete() .verify(); StepVerifier .create(asyncClient.deleteRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, floorTwinId); StepVerifier .create(asyncClient.deleteRelationship(roomTwinId, ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", ROOM_CONTAINED_IN_FLOOR_RELATIONSHIP_ID, roomTwinId); StepVerifier .create(asyncClient.deleteRelationship(floorTwinId, FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", FLOOR_COOLED_BY_HVAC_RELATIONSHIP_ID, floorTwinId); StepVerifier .create(asyncClient.deleteRelationship(hvacTwinId, HVAC_COOLS_FLOOR_RELATIONSHIP_ID)) .verifyComplete(); logger.info("Deleted relationship {} for twin {}", HVAC_COOLS_FLOOR_RELATIONSHIP_ID, hvacTwinId); StepVerifier .create(asyncClient.getRelationship(floorTwinId, FLOOR_CONTAINS_ROOM_RELATIONSHIP_ID, String.class)) .verifyErrorSatisfies(ex -> assertRestException(ex, HTTP_NOT_FOUND)); } finally { try { logger.info("Cleaning up test resources."); logger.info("Deleting created relationships."); List<BasicRelationship> relationships = new ArrayList<>(); asyncClient.listRelationships(floorTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); asyncClient.listRelationships(roomTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); asyncClient.listRelationships(hvacTwinId, BasicRelationship.class) .doOnNext(relationships::add) .blockLast(); relationships.forEach(basicRelationship -> asyncClient.deleteRelationship(basicRelationship.getSourceId(), basicRelationship.getId()).block()); logger.info("Deleting created digital twins."); asyncClient.deleteDigitalTwin(floorTwinId).block(); asyncClient.deleteDigitalTwin(roomTwinId).block(); asyncClient.deleteDigitalTwin(hvacTwinId).block(); logger.info("Deleting created models."); asyncClient.deleteModel(floorModelId).block(); asyncClient.deleteModel(roomModelId).block(); asyncClient.deleteModel(hvacModelId).block(); } catch (Exception ex) { fail("Test cleanup failed", ex); } } } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override private void createModelsAndTwins(DigitalTwinsAsyncClient asyncClient, String floorModelId, String roomModelId, String hvacModelId, String floorTwinId, String roomTwinId, String hvacTwinId) throws JsonProcessingException { createModelsRunner( floorModelId, roomModelId, hvacModelId, modelsList -> StepVerifier .create(asyncClient.createModels(modelsList)) .assertNext(createResponseList -> logger.info("Created models successfully")) .verifyComplete()); createFloorTwinRunner( floorTwinId, floorModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); createRoomTwinRunner( roomTwinId, roomModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); createHvacTwinRunner( hvacTwinId, hvacModelId, (twinId, twin) -> StepVerifier .create(asyncClient.createDigitalTwin(twinId, twin, BasicDigitalTwin.class)) .assertNext(basicDigitalTwin -> logger.info("Created {} twin successfully", basicDigitalTwin.getId())) .verifyComplete()); } }
This wasn't GA'd right?
public Mono<ShareInfo> setProperties(ShareSetPropertiesOptions options) { try { return setPropertiesWithResponse(options).map(Response::getValue); } catch (RuntimeException ex) { return monoError(logger, ex); } }
} catch (RuntimeException ex) {
public Mono<ShareInfo> setProperties(ShareSetPropertiesOptions options) { try { return setPropertiesWithResponse(options).map(Response::getValue); } catch (RuntimeException ex) { return monoError(logger, ex); } }
class ShareAsyncClient { private final ClientLogger logger = new ClientLogger(ShareAsyncClient.class); private final AzureFileStorageImpl azureFileStorageClient; private final String shareName; private final String snapshot; private final String accountName; private final ShareServiceVersion serviceVersion; /** * Creates a ShareAsyncClient that sends requests to the storage share at {@link AzureFileStorageImpl * endpoint}. Each service call goes through the {@link HttpPipeline pipeline} in the * {@code azureFileStorageClient}. * * @param client Client that interacts with the service interfaces * @param shareName Name of the share */ ShareAsyncClient(AzureFileStorageImpl client, String shareName, String snapshot, String accountName, ShareServiceVersion serviceVersion) { Objects.requireNonNull(shareName, "'shareName' cannot be null."); this.shareName = shareName; this.snapshot = snapshot; this.accountName = accountName; this.azureFileStorageClient = client; this.serviceVersion = serviceVersion; } /** * Get the url of the storage share client. * * @return the url of the Storage Share. */ public String getShareUrl() { StringBuilder shareUrlString = new StringBuilder(azureFileStorageClient.getUrl()).append("/").append(shareName); if (snapshot != null) { shareUrlString.append("?sharesnapshot=").append(snapshot); } return shareUrlString.toString(); } /** * Gets the service version the client is using. * * @return the service version the client is using. */ public ShareServiceVersion getServiceVersion() { return serviceVersion; } /** * Constructs a {@link ShareDirectoryAsyncClient} that interacts with the root directory in the share. * * <p>If the directory doesn't exist in the share {@link ShareDirectoryAsyncClient * azureFileStorageClient will need to be called before interaction with the directory can happen.</p> * * @return a {@link ShareDirectoryAsyncClient} that interacts with the root directory in the share */ public ShareDirectoryAsyncClient getRootDirectoryClient() { return getDirectoryClient(""); } /** * Constructs a {@link ShareDirectoryAsyncClient} that interacts with the specified directory. * * <p>If the directory doesn't exist in the share {@link ShareDirectoryAsyncClient * azureFileStorageClient will need to be called before interaction with the directory can happen.</p> * * @param directoryName Name of the directory * @return a {@link ShareDirectoryAsyncClient} that interacts with the directory in the share */ public ShareDirectoryAsyncClient getDirectoryClient(String directoryName) { return new ShareDirectoryAsyncClient(azureFileStorageClient, shareName, directoryName, snapshot, accountName, serviceVersion); } /** * Constructs a {@link ShareFileAsyncClient} that interacts with the specified file. * * <p>If the file doesn't exist in the share {@link ShareFileAsyncClient
class ShareAsyncClient { private final ClientLogger logger = new ClientLogger(ShareAsyncClient.class); private final AzureFileStorageImpl azureFileStorageClient; private final String shareName; private final String snapshot; private final String accountName; private final ShareServiceVersion serviceVersion; /** * Creates a ShareAsyncClient that sends requests to the storage share at {@link AzureFileStorageImpl * endpoint}. Each service call goes through the {@link HttpPipeline pipeline} in the * {@code azureFileStorageClient}. * * @param client Client that interacts with the service interfaces * @param shareName Name of the share */ ShareAsyncClient(AzureFileStorageImpl client, String shareName, String snapshot, String accountName, ShareServiceVersion serviceVersion) { Objects.requireNonNull(shareName, "'shareName' cannot be null."); this.shareName = shareName; this.snapshot = snapshot; this.accountName = accountName; this.azureFileStorageClient = client; this.serviceVersion = serviceVersion; } /** * Get the url of the storage share client. * * @return the url of the Storage Share. */ public String getShareUrl() { StringBuilder shareUrlString = new StringBuilder(azureFileStorageClient.getUrl()).append("/").append(shareName); if (snapshot != null) { shareUrlString.append("?sharesnapshot=").append(snapshot); } return shareUrlString.toString(); } /** * Gets the service version the client is using. * * @return the service version the client is using. */ public ShareServiceVersion getServiceVersion() { return serviceVersion; } /** * Constructs a {@link ShareDirectoryAsyncClient} that interacts with the root directory in the share. * * <p>If the directory doesn't exist in the share {@link ShareDirectoryAsyncClient * azureFileStorageClient will need to be called before interaction with the directory can happen.</p> * * @return a {@link ShareDirectoryAsyncClient} that interacts with the root directory in the share */ public ShareDirectoryAsyncClient getRootDirectoryClient() { return getDirectoryClient(""); } /** * Constructs a {@link ShareDirectoryAsyncClient} that interacts with the specified directory. * * <p>If the directory doesn't exist in the share {@link ShareDirectoryAsyncClient * azureFileStorageClient will need to be called before interaction with the directory can happen.</p> * * @param directoryName Name of the directory * @return a {@link ShareDirectoryAsyncClient} that interacts with the directory in the share */ public ShareDirectoryAsyncClient getDirectoryClient(String directoryName) { return new ShareDirectoryAsyncClient(azureFileStorageClient, shareName, directoryName, snapshot, accountName, serviceVersion); } /** * Constructs a {@link ShareFileAsyncClient} that interacts with the specified file. * * <p>If the file doesn't exist in the share {@link ShareFileAsyncClient